You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3736 lines
166 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import itertools
  4. import json
  5. import os.path
  6. import random
  7. import re
  8. import time
  9. import traceback
  10. from .common import InfoExtractor, SearchInfoExtractor
  11. from ..jsinterp import JSInterpreter
  12. from ..swfinterp import SWFInterpreter
  13. from ..compat import (
  14. compat_chr,
  15. compat_kwargs,
  16. compat_parse_qs,
  17. compat_urllib_parse_unquote,
  18. compat_urllib_parse_unquote_plus,
  19. compat_urllib_parse_urlencode,
  20. compat_urllib_parse_urlparse,
  21. compat_urlparse,
  22. compat_str,
  23. )
  24. from ..utils import (
  25. bool_or_none,
  26. clean_html,
  27. error_to_compat_str,
  28. ExtractorError,
  29. float_or_none,
  30. get_element_by_id,
  31. int_or_none,
  32. mimetype2ext,
  33. parse_codecs,
  34. parse_count,
  35. parse_duration,
  36. remove_quotes,
  37. remove_start,
  38. smuggle_url,
  39. str_or_none,
  40. str_to_int,
  41. try_get,
  42. unescapeHTML,
  43. unified_strdate,
  44. unsmuggle_url,
  45. update_url_query,
  46. uppercase_escape,
  47. url_or_none,
  48. urlencode_postdata,
  49. urljoin,
  50. )
  51. class YoutubeBaseInfoExtractor(InfoExtractor):
  52. """Provide base functions for Youtube extractors"""
  53. _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
  54. _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
  55. _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
  56. _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
  57. _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
  58. _RESERVED_NAMES = (
  59. r'course|embed|channel|c|user|playlist|watch|w|results|storefront|oops|'
  60. r'shared|index|account|reporthistory|t/terms|about|upload|signin|logout|'
  61. r'feed/(watch_later|history|subscriptions|library|trending|recommended)')
  62. _NETRC_MACHINE = 'youtube'
  63. # If True it will raise an error if no login info is provided
  64. _LOGIN_REQUIRED = False
  65. _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)'
  66. _YOUTUBE_CLIENT_HEADERS = {
  67. 'x-youtube-client-name': '1',
  68. 'x-youtube-client-version': '1.20200609.04.02',
  69. }
  70. def _set_language(self):
  71. self._set_cookie(
  72. '.youtube.com', 'PREF', 'f1=50000000&f6=8&hl=en',
  73. # YouTube sets the expire time to about two months
  74. expire_time=time.time() + 2 * 30 * 24 * 3600)
  75. def _ids_to_results(self, ids):
  76. return [
  77. self.url_result(vid_id, 'Youtube', video_id=vid_id)
  78. for vid_id in ids]
  79. def _login(self):
  80. """
  81. Attempt to log in to YouTube.
  82. True is returned if successful or skipped.
  83. False is returned if login failed.
  84. If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
  85. """
  86. username, password = self._get_login_info()
  87. # No authentication to be performed
  88. if username is None:
  89. if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
  90. raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
  91. if self._downloader.params.get('cookiefile') and False: # TODO remove 'and False' later - too many people using outdated cookies and open issues, remind them.
  92. self.to_screen('[Cookies] Reminder - Make sure to always use up to date cookies!')
  93. return True
  94. login_page = self._download_webpage(
  95. self._LOGIN_URL, None,
  96. note='Downloading login page',
  97. errnote='unable to fetch login page', fatal=False)
  98. if login_page is False:
  99. return
  100. login_form = self._hidden_inputs(login_page)
  101. def req(url, f_req, note, errnote):
  102. data = login_form.copy()
  103. data.update({
  104. 'pstMsg': 1,
  105. 'checkConnection': 'youtube',
  106. 'checkedDomains': 'youtube',
  107. 'hl': 'en',
  108. 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
  109. 'f.req': json.dumps(f_req),
  110. 'flowName': 'GlifWebSignIn',
  111. 'flowEntry': 'ServiceLogin',
  112. # TODO: reverse actual botguard identifier generation algo
  113. 'bgRequest': '["identifier",""]',
  114. })
  115. return self._download_json(
  116. url, None, note=note, errnote=errnote,
  117. transform_source=lambda s: re.sub(r'^[^[]*', '', s),
  118. fatal=False,
  119. data=urlencode_postdata(data), headers={
  120. 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
  121. 'Google-Accounts-XSRF': 1,
  122. })
  123. def warn(message):
  124. self._downloader.report_warning(message)
  125. lookup_req = [
  126. username,
  127. None, [], None, 'US', None, None, 2, False, True,
  128. [
  129. None, None,
  130. [2, 1, None, 1,
  131. 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
  132. None, [], 4],
  133. 1, [None, None, []], None, None, None, True
  134. ],
  135. username,
  136. ]
  137. lookup_results = req(
  138. self._LOOKUP_URL, lookup_req,
  139. 'Looking up account info', 'Unable to look up account info')
  140. if lookup_results is False:
  141. return False
  142. user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
  143. if not user_hash:
  144. warn('Unable to extract user hash')
  145. return False
  146. challenge_req = [
  147. user_hash,
  148. None, 1, None, [1, None, None, None, [password, None, True]],
  149. [
  150. None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
  151. 1, [None, None, []], None, None, None, True
  152. ]]
  153. challenge_results = req(
  154. self._CHALLENGE_URL, challenge_req,
  155. 'Logging in', 'Unable to log in')
  156. if challenge_results is False:
  157. return
  158. login_res = try_get(challenge_results, lambda x: x[0][5], list)
  159. if login_res:
  160. login_msg = try_get(login_res, lambda x: x[5], compat_str)
  161. warn(
  162. 'Unable to login: %s' % 'Invalid password'
  163. if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
  164. return False
  165. res = try_get(challenge_results, lambda x: x[0][-1], list)
  166. if not res:
  167. warn('Unable to extract result entry')
  168. return False
  169. login_challenge = try_get(res, lambda x: x[0][0], list)
  170. if login_challenge:
  171. challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
  172. if challenge_str == 'TWO_STEP_VERIFICATION':
  173. # SEND_SUCCESS - TFA code has been successfully sent to phone
  174. # QUOTA_EXCEEDED - reached the limit of TFA codes
  175. status = try_get(login_challenge, lambda x: x[5], compat_str)
  176. if status == 'QUOTA_EXCEEDED':
  177. warn('Exceeded the limit of TFA codes, try later')
  178. return False
  179. tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
  180. if not tl:
  181. warn('Unable to extract TL')
  182. return False
  183. tfa_code = self._get_tfa_info('2-step verification code')
  184. if not tfa_code:
  185. warn(
  186. 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
  187. '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
  188. return False
  189. tfa_code = remove_start(tfa_code, 'G-')
  190. tfa_req = [
  191. user_hash, None, 2, None,
  192. [
  193. 9, None, None, None, None, None, None, None,
  194. [None, tfa_code, True, 2]
  195. ]]
  196. tfa_results = req(
  197. self._TFA_URL.format(tl), tfa_req,
  198. 'Submitting TFA code', 'Unable to submit TFA code')
  199. if tfa_results is False:
  200. return False
  201. tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
  202. if tfa_res:
  203. tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
  204. warn(
  205. 'Unable to finish TFA: %s' % 'Invalid TFA code'
  206. if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
  207. return False
  208. check_cookie_url = try_get(
  209. tfa_results, lambda x: x[0][-1][2], compat_str)
  210. else:
  211. CHALLENGES = {
  212. 'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
  213. 'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
  214. 'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
  215. }
  216. challenge = CHALLENGES.get(
  217. challenge_str,
  218. '%s returned error %s.' % (self.IE_NAME, challenge_str))
  219. warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
  220. return False
  221. else:
  222. check_cookie_url = try_get(res, lambda x: x[2], compat_str)
  223. if not check_cookie_url:
  224. warn('Unable to extract CheckCookie URL')
  225. return False
  226. check_cookie_results = self._download_webpage(
  227. check_cookie_url, None, 'Checking cookie', fatal=False)
  228. if check_cookie_results is False:
  229. return False
  230. if 'https://myaccount.google.com/' not in check_cookie_results:
  231. warn('Unable to log in')
  232. return False
  233. return True
  234. def _download_webpage_handle(self, *args, **kwargs):
  235. query = kwargs.get('query', {}).copy()
  236. kwargs['query'] = query
  237. return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
  238. *args, **compat_kwargs(kwargs))
  239. def _get_yt_initial_data(self, video_id, webpage):
  240. config = self._search_regex(
  241. (r'window\["ytInitialData"\]\s*=\s*(.*?)(?<=});',
  242. r'var\s+ytInitialData\s*=\s*(.*?)(?<=});'),
  243. webpage, 'ytInitialData', default=None)
  244. if config:
  245. return self._parse_json(
  246. uppercase_escape(config), video_id, fatal=False)
  247. def _real_initialize(self):
  248. if self._downloader is None:
  249. return
  250. self._set_language()
  251. if not self._login():
  252. return
  253. _DEFAULT_API_DATA = {
  254. 'context': {
  255. 'client': {
  256. 'clientName': 'WEB',
  257. 'clientVersion': '2.20201021.03.00',
  258. }
  259. },
  260. }
  261. _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
  262. def _call_api(self, ep, query, video_id):
  263. data = self._DEFAULT_API_DATA.copy()
  264. data.update(query)
  265. response = self._download_json(
  266. 'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
  267. note='Downloading API JSON', errnote='Unable to download API page',
  268. data=json.dumps(data).encode('utf8'),
  269. headers={'content-type': 'application/json'},
  270. query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
  271. return response
  272. def _extract_yt_initial_data(self, video_id, webpage):
  273. return self._parse_json(
  274. self._search_regex(
  275. (r'%s\s*\n' % self._YT_INITIAL_DATA_RE,
  276. self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
  277. video_id)
  278. class YoutubeIE(YoutubeBaseInfoExtractor):
  279. IE_DESC = 'YouTube.com'
  280. _VALID_URL = r"""(?x)^
  281. (
  282. (?:https?://|//) # http(s):// or protocol-independent URL
  283. (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/|
  284. (?:www\.)?deturl\.com/www\.youtube\.com/|
  285. (?:www\.)?pwnyoutube\.com/|
  286. (?:www\.)?hooktube\.com/|
  287. (?:www\.)?yourepeat\.com/|
  288. tube\.majestyc\.net/|
  289. # Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances
  290. (?:(?:www|dev)\.)?invidio\.us/|
  291. (?:(?:www|no)\.)?invidiou\.sh/|
  292. (?:(?:www|fi|de)\.)?invidious\.snopyta\.org/|
  293. (?:www\.)?invidious\.kabi\.tk/|
  294. (?:www\.)?invidious\.13ad\.de/|
  295. (?:www\.)?invidious\.mastodon\.host/|
  296. (?:www\.)?invidious\.nixnet\.xyz/|
  297. (?:www\.)?invidious\.drycat\.fr/|
  298. (?:www\.)?tube\.poal\.co/|
  299. (?:www\.)?vid\.wxzm\.sx/|
  300. (?:www\.)?yewtu\.be/|
  301. (?:www\.)?yt\.elukerio\.org/|
  302. (?:www\.)?yt\.lelux\.fi/|
  303. (?:www\.)?invidious\.ggc-project\.de/|
  304. (?:www\.)?yt\.maisputain\.ovh/|
  305. (?:www\.)?invidious\.13ad\.de/|
  306. (?:www\.)?invidious\.toot\.koeln/|
  307. (?:www\.)?invidious\.fdn\.fr/|
  308. (?:www\.)?watch\.nettohikari\.com/|
  309. (?:www\.)?kgg2m7yk5aybusll\.onion/|
  310. (?:www\.)?qklhadlycap4cnod\.onion/|
  311. (?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/|
  312. (?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion/|
  313. (?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/|
  314. (?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/|
  315. (?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/|
  316. (?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion/|
  317. youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
  318. (?:.*?\#/)? # handle anchor (#/) redirect urls
  319. (?: # the various things that can precede the ID:
  320. (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
  321. |(?: # or the v= param in all its forms
  322. (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
  323. (?:\?|\#!?) # the params delimiter ? or # or #!
  324. (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
  325. v=
  326. )
  327. ))
  328. |(?:
  329. youtu\.be| # just youtu.be/xxxx
  330. vid\.plus| # or vid.plus/xxxx
  331. zwearz\.com/watch| # or zwearz.com/watch/xxxx
  332. )/
  333. |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
  334. )
  335. )? # all until now is optional -> you can pass the naked ID
  336. (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
  337. (?!.*?\blist=
  338. (?:
  339. %(playlist_id)s| # combined list/video URLs are handled by the playlist IE
  340. WL # WL are handled by the watch later IE
  341. )
  342. )
  343. (?(1).+)? # if we found the ID, everything can follow
  344. $""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  345. _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
  346. _PLAYER_INFO_RE = (
  347. r'/(?P<id>[a-zA-Z0-9_-]{8,})/player_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?/base\.(?P<ext>[a-z]+)$',
  348. r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.(?P<ext>[a-z]+)$',
  349. )
  350. _formats = {
  351. '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  352. '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  353. '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
  354. '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
  355. '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
  356. '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  357. '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  358. '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  359. # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
  360. '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
  361. '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  362. '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  363. '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  364. '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  365. '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  366. '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  367. '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  368. '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  369. # 3D videos
  370. '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  371. '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  372. '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  373. '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  374. '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
  375. '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  376. '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  377. # Apple HTTP Live Streaming
  378. '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  379. '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  380. '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  381. '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  382. '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  383. '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  384. '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  385. '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
  386. # DASH mp4 video
  387. '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
  388. '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
  389. '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  390. '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
  391. '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
  392. '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
  393. '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
  394. '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  395. '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
  396. '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  397. '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  398. '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
  399. # Dash mp4 audio
  400. '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
  401. '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
  402. '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
  403. '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  404. '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  405. '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
  406. '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
  407. # Dash webm
  408. '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  409. '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  410. '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  411. '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  412. '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  413. '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  414. '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
  415. '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  416. '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  417. '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  418. '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  419. '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  420. '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  421. '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  422. '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  423. # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
  424. '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  425. '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  426. '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  427. '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  428. '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  429. '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  430. # Dash webm audio
  431. '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
  432. '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
  433. # Dash webm audio with opus inside
  434. '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
  435. '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
  436. '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
  437. # RTMP (unnamed)
  438. '_rtmp': {'protocol': 'rtmp'},
  439. # av01 video only formats sometimes served with "unknown" codecs
  440. '394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  441. '395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  442. '396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  443. '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  444. }
  445. _SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt') # TODO 'json3' raising issues with automatic captions
  446. _GEO_BYPASS = False
  447. IE_NAME = 'youtube'
  448. _TESTS = [
  449. {
  450. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
  451. 'info_dict': {
  452. 'id': 'BaW_jenozKc',
  453. 'ext': 'mp4',
  454. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  455. 'uploader': 'Philipp Hagemeister',
  456. 'uploader_id': 'phihag',
  457. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
  458. 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
  459. 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
  460. 'upload_date': '20121002',
  461. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  462. 'categories': ['Science & Technology'],
  463. 'tags': ['youtube-dl'],
  464. 'duration': 10,
  465. 'view_count': int,
  466. 'like_count': int,
  467. 'dislike_count': int,
  468. 'start_time': 1,
  469. 'end_time': 9,
  470. }
  471. },
  472. {
  473. 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
  474. 'note': 'Embed-only video (#1746)',
  475. 'info_dict': {
  476. 'id': 'yZIXLfi8CZQ',
  477. 'ext': 'mp4',
  478. 'upload_date': '20120608',
  479. 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
  480. 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
  481. 'uploader': 'SET India',
  482. 'uploader_id': 'setindia',
  483. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
  484. 'age_limit': 18,
  485. }
  486. },
  487. {
  488. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
  489. 'note': 'Use the first video ID in the URL',
  490. 'info_dict': {
  491. 'id': 'BaW_jenozKc',
  492. 'ext': 'mp4',
  493. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  494. 'uploader': 'Philipp Hagemeister',
  495. 'uploader_id': 'phihag',
  496. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
  497. 'upload_date': '20121002',
  498. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  499. 'categories': ['Science & Technology'],
  500. 'tags': ['youtube-dl'],
  501. 'duration': 10,
  502. 'view_count': int,
  503. 'like_count': int,
  504. 'dislike_count': int,
  505. },
  506. 'params': {
  507. 'skip_download': True,
  508. },
  509. },
  510. {
  511. 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
  512. 'note': '256k DASH audio (format 141) via DASH manifest',
  513. 'info_dict': {
  514. 'id': 'a9LDPn-MO4I',
  515. 'ext': 'm4a',
  516. 'upload_date': '20121002',
  517. 'uploader_id': '8KVIDEO',
  518. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
  519. 'description': '',
  520. 'uploader': '8KVIDEO',
  521. 'title': 'UHDTV TEST 8K VIDEO.mp4'
  522. },
  523. 'params': {
  524. 'youtube_include_dash_manifest': True,
  525. 'format': '141',
  526. },
  527. 'skip': 'format 141 not served anymore',
  528. },
  529. # DASH manifest with encrypted signature
  530. {
  531. 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
  532. 'info_dict': {
  533. 'id': 'IB3lcPjvWLA',
  534. 'ext': 'm4a',
  535. 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
  536. 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
  537. 'duration': 244,
  538. 'uploader': 'AfrojackVEVO',
  539. 'uploader_id': 'AfrojackVEVO',
  540. 'upload_date': '20131011',
  541. },
  542. 'params': {
  543. 'youtube_include_dash_manifest': True,
  544. 'format': '141/bestaudio[ext=m4a]',
  545. },
  546. },
  547. # Controversy video
  548. {
  549. 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
  550. 'info_dict': {
  551. 'id': 'T4XJQO3qol8',
  552. 'ext': 'mp4',
  553. 'duration': 219,
  554. 'upload_date': '20100909',
  555. 'uploader': 'Amazing Atheist',
  556. 'uploader_id': 'TheAmazingAtheist',
  557. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
  558. 'title': 'Burning Everyone\'s Koran',
  559. 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
  560. }
  561. },
  562. # Normal age-gate video (embed allowed)
  563. {
  564. 'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
  565. 'info_dict': {
  566. 'id': 'HtVdAasjOgU',
  567. 'ext': 'mp4',
  568. 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
  569. 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
  570. 'duration': 142,
  571. 'uploader': 'The Witcher',
  572. 'uploader_id': 'WitcherGame',
  573. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
  574. 'upload_date': '20140605',
  575. 'age_limit': 18,
  576. },
  577. },
  578. # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
  579. # YouTube Red ad is not captured for creator
  580. {
  581. 'url': '__2ABJjxzNo',
  582. 'info_dict': {
  583. 'id': '__2ABJjxzNo',
  584. 'ext': 'mp4',
  585. 'duration': 266,
  586. 'upload_date': '20100430',
  587. 'uploader_id': 'deadmau5',
  588. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
  589. 'creator': 'Dada Life, deadmau5',
  590. 'description': 'md5:12c56784b8032162bb936a5f76d55360',
  591. 'uploader': 'deadmau5',
  592. 'title': 'Deadmau5 - Some Chords (HD)',
  593. 'alt_title': 'This Machine Kills Some Chords',
  594. },
  595. 'expected_warnings': [
  596. 'DASH manifest missing',
  597. ]
  598. },
  599. # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
  600. {
  601. 'url': 'lqQg6PlCWgI',
  602. 'info_dict': {
  603. 'id': 'lqQg6PlCWgI',
  604. 'ext': 'mp4',
  605. 'duration': 6085,
  606. 'upload_date': '20150827',
  607. 'uploader_id': 'olympic',
  608. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
  609. 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
  610. 'uploader': 'Olympic',
  611. 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
  612. },
  613. 'params': {
  614. 'skip_download': 'requires avconv',
  615. }
  616. },
  617. # Non-square pixels
  618. {
  619. 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
  620. 'info_dict': {
  621. 'id': '_b-2C3KPAM0',
  622. 'ext': 'mp4',
  623. 'stretched_ratio': 16 / 9.,
  624. 'duration': 85,
  625. 'upload_date': '20110310',
  626. 'uploader_id': 'AllenMeow',
  627. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
  628. 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
  629. 'uploader': '孫ᄋᄅ',
  630. 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
  631. },
  632. },
  633. # url_encoded_fmt_stream_map is empty string
  634. {
  635. 'url': 'qEJwOuvDf7I',
  636. 'info_dict': {
  637. 'id': 'qEJwOuvDf7I',
  638. 'ext': 'webm',
  639. 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
  640. 'description': '',
  641. 'upload_date': '20150404',
  642. 'uploader_id': 'spbelect',
  643. 'uploader': 'Наблюдатели Петербурга',
  644. },
  645. 'params': {
  646. 'skip_download': 'requires avconv',
  647. },
  648. 'skip': 'This live event has ended.',
  649. },
  650. # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
  651. {
  652. 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
  653. 'info_dict': {
  654. 'id': 'FIl7x6_3R5Y',
  655. 'ext': 'webm',
  656. 'title': 'md5:7b81415841e02ecd4313668cde88737a',
  657. 'description': 'md5:116377fd2963b81ec4ce64b542173306',
  658. 'duration': 220,
  659. 'upload_date': '20150625',
  660. 'uploader_id': 'dorappi2000',
  661. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
  662. 'uploader': 'dorappi2000',
  663. 'formats': 'mincount:31',
  664. },
  665. 'skip': 'not actual anymore',
  666. },
  667. # DASH manifest with segment_list
  668. {
  669. 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
  670. 'md5': '8ce563a1d667b599d21064e982ab9e31',
  671. 'info_dict': {
  672. 'id': 'CsmdDsKjzN8',
  673. 'ext': 'mp4',
  674. 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
  675. 'uploader': 'Airtek',
  676. 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
  677. 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
  678. 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
  679. },
  680. 'params': {
  681. 'youtube_include_dash_manifest': True,
  682. 'format': '135', # bestvideo
  683. },
  684. 'skip': 'This live event has ended.',
  685. },
  686. {
  687. # Multifeed videos (multiple cameras), URL is for Main Camera
  688. 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
  689. 'info_dict': {
  690. 'id': 'jqWvoWXjCVs',
  691. 'title': 'teamPGP: Rocket League Noob Stream',
  692. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  693. },
  694. 'playlist': [{
  695. 'info_dict': {
  696. 'id': 'jqWvoWXjCVs',
  697. 'ext': 'mp4',
  698. 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
  699. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  700. 'duration': 7335,
  701. 'upload_date': '20150721',
  702. 'uploader': 'Beer Games Beer',
  703. 'uploader_id': 'beergamesbeer',
  704. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
  705. 'license': 'Standard YouTube License',
  706. },
  707. }, {
  708. 'info_dict': {
  709. 'id': '6h8e8xoXJzg',
  710. 'ext': 'mp4',
  711. 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
  712. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  713. 'duration': 7337,
  714. 'upload_date': '20150721',
  715. 'uploader': 'Beer Games Beer',
  716. 'uploader_id': 'beergamesbeer',
  717. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
  718. 'license': 'Standard YouTube License',
  719. },
  720. }, {
  721. 'info_dict': {
  722. 'id': 'PUOgX5z9xZw',
  723. 'ext': 'mp4',
  724. 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
  725. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  726. 'duration': 7337,
  727. 'upload_date': '20150721',
  728. 'uploader': 'Beer Games Beer',
  729. 'uploader_id': 'beergamesbeer',
  730. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
  731. 'license': 'Standard YouTube License',
  732. },
  733. }, {
  734. 'info_dict': {
  735. 'id': 'teuwxikvS5k',
  736. 'ext': 'mp4',
  737. 'title': 'teamPGP: Rocket League Noob Stream (zim)',
  738. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  739. 'duration': 7334,
  740. 'upload_date': '20150721',
  741. 'uploader': 'Beer Games Beer',
  742. 'uploader_id': 'beergamesbeer',
  743. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
  744. 'license': 'Standard YouTube License',
  745. },
  746. }],
  747. 'params': {
  748. 'skip_download': True,
  749. },
  750. 'skip': 'This video is not available.',
  751. },
  752. {
  753. # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
  754. 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
  755. 'info_dict': {
  756. 'id': 'gVfLd0zydlo',
  757. 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
  758. },
  759. 'playlist_count': 2,
  760. 'skip': 'Not multifeed anymore',
  761. },
  762. {
  763. 'url': 'https://vid.plus/FlRa-iH7PGw',
  764. 'only_matching': True,
  765. },
  766. {
  767. 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
  768. 'only_matching': True,
  769. },
  770. {
  771. # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  772. # Also tests cut-off URL expansion in video description (see
  773. # https://github.com/ytdl-org/youtube-dl/issues/1892,
  774. # https://github.com/ytdl-org/youtube-dl/issues/8164)
  775. 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
  776. 'info_dict': {
  777. 'id': 'lsguqyKfVQg',
  778. 'ext': 'mp4',
  779. 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
  780. 'alt_title': 'Dark Walk - Position Music',
  781. 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
  782. 'duration': 133,
  783. 'upload_date': '20151119',
  784. 'uploader_id': 'IronSoulElf',
  785. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
  786. 'uploader': 'IronSoulElf',
  787. 'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
  788. 'track': 'Dark Walk - Position Music',
  789. 'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
  790. 'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
  791. },
  792. 'params': {
  793. 'skip_download': True,
  794. },
  795. },
  796. {
  797. # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  798. 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
  799. 'only_matching': True,
  800. },
  801. {
  802. # Video with yt:stretch=17:0
  803. 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
  804. 'info_dict': {
  805. 'id': 'Q39EVAstoRM',
  806. 'ext': 'mp4',
  807. 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
  808. 'description': 'md5:ee18a25c350637c8faff806845bddee9',
  809. 'upload_date': '20151107',
  810. 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
  811. 'uploader': 'CH GAMER DROID',
  812. },
  813. 'params': {
  814. 'skip_download': True,
  815. },
  816. 'skip': 'This video does not exist.',
  817. },
  818. {
  819. # Video licensed under Creative Commons
  820. 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
  821. 'info_dict': {
  822. 'id': 'M4gD1WSo5mA',
  823. 'ext': 'mp4',
  824. 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
  825. 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
  826. 'duration': 721,
  827. 'upload_date': '20150127',
  828. 'uploader_id': 'BerkmanCenter',
  829. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
  830. 'uploader': 'The Berkman Klein Center for Internet & Society',
  831. 'license': 'Creative Commons Attribution license (reuse allowed)',
  832. },
  833. 'params': {
  834. 'skip_download': True,
  835. },
  836. },
  837. {
  838. # Channel-like uploader_url
  839. 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
  840. 'info_dict': {
  841. 'id': 'eQcmzGIKrzg',
  842. 'ext': 'mp4',
  843. 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
  844. 'description': 'md5:dda0d780d5a6e120758d1711d062a867',
  845. 'duration': 4060,
  846. 'upload_date': '20151119',
  847. 'uploader': 'Bernie Sanders',
  848. 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
  849. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
  850. 'license': 'Creative Commons Attribution license (reuse allowed)',
  851. },
  852. 'params': {
  853. 'skip_download': True,
  854. },
  855. },
  856. {
  857. 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
  858. 'only_matching': True,
  859. },
  860. {
  861. # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
  862. 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
  863. 'only_matching': True,
  864. },
  865. {
  866. # Rental video preview
  867. 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
  868. 'info_dict': {
  869. 'id': 'uGpuVWrhIzE',
  870. 'ext': 'mp4',
  871. 'title': 'Piku - Trailer',
  872. 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
  873. 'upload_date': '20150811',
  874. 'uploader': 'FlixMatrix',
  875. 'uploader_id': 'FlixMatrixKaravan',
  876. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
  877. 'license': 'Standard YouTube License',
  878. },
  879. 'params': {
  880. 'skip_download': True,
  881. },
  882. 'skip': 'This video is not available.',
  883. },
  884. {
  885. # YouTube Red video with episode data
  886. 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
  887. 'info_dict': {
  888. 'id': 'iqKdEhx-dD4',
  889. 'ext': 'mp4',
  890. 'title': 'Isolation - Mind Field (Ep 1)',
  891. 'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
  892. 'duration': 2085,
  893. 'upload_date': '20170118',
  894. 'uploader': 'Vsauce',
  895. 'uploader_id': 'Vsauce',
  896. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
  897. 'series': 'Mind Field',
  898. 'season_number': 1,
  899. 'episode_number': 1,
  900. },
  901. 'params': {
  902. 'skip_download': True,
  903. },
  904. 'expected_warnings': [
  905. 'Skipping DASH manifest',
  906. ],
  907. },
  908. {
  909. # The following content has been identified by the YouTube community
  910. # as inappropriate or offensive to some audiences.
  911. 'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
  912. 'info_dict': {
  913. 'id': '6SJNVb0GnPI',
  914. 'ext': 'mp4',
  915. 'title': 'Race Differences in Intelligence',
  916. 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
  917. 'duration': 965,
  918. 'upload_date': '20140124',
  919. 'uploader': 'New Century Foundation',
  920. 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
  921. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
  922. },
  923. 'params': {
  924. 'skip_download': True,
  925. },
  926. },
  927. {
  928. # itag 212
  929. 'url': '1t24XAntNCY',
  930. 'only_matching': True,
  931. },
  932. {
  933. # geo restricted to JP
  934. 'url': 'sJL6WA-aGkQ',
  935. 'only_matching': True,
  936. },
  937. {
  938. 'url': 'https://invidio.us/watch?v=BaW_jenozKc',
  939. 'only_matching': True,
  940. },
  941. {
  942. # DRM protected
  943. 'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
  944. 'only_matching': True,
  945. },
  946. {
  947. # Video with unsupported adaptive stream type formats
  948. 'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
  949. 'info_dict': {
  950. 'id': 'Z4Vy8R84T1U',
  951. 'ext': 'mp4',
  952. 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
  953. 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
  954. 'duration': 433,
  955. 'upload_date': '20130923',
  956. 'uploader': 'Amelia Putri Harwita',
  957. 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
  958. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
  959. 'formats': 'maxcount:10',
  960. },
  961. 'params': {
  962. 'skip_download': True,
  963. 'youtube_include_dash_manifest': False,
  964. },
  965. 'skip': 'not actual anymore',
  966. },
  967. {
  968. # Youtube Music Auto-generated description
  969. 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
  970. 'info_dict': {
  971. 'id': 'MgNrAu2pzNs',
  972. 'ext': 'mp4',
  973. 'title': 'Voyeur Girl',
  974. 'description': 'md5:7ae382a65843d6df2685993e90a8628f',
  975. 'upload_date': '20190312',
  976. 'uploader': 'Stephen - Topic',
  977. 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
  978. 'artist': 'Stephen',
  979. 'track': 'Voyeur Girl',
  980. 'album': 'it\'s too much love to know my dear',
  981. 'release_date': '20190313',
  982. 'release_year': 2019,
  983. },
  984. 'params': {
  985. 'skip_download': True,
  986. },
  987. },
  988. {
  989. 'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
  990. 'only_matching': True,
  991. },
  992. {
  993. # invalid -> valid video id redirection
  994. 'url': 'DJztXj2GPfl',
  995. 'info_dict': {
  996. 'id': 'DJztXj2GPfk',
  997. 'ext': 'mp4',
  998. 'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
  999. 'description': 'md5:bf577a41da97918e94fa9798d9228825',
  1000. 'upload_date': '20090125',
  1001. 'uploader': 'Prochorowka',
  1002. 'uploader_id': 'Prochorowka',
  1003. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
  1004. 'artist': 'Panjabi MC',
  1005. 'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
  1006. 'album': 'Beware of the Boys (Mundian To Bach Ke)',
  1007. },
  1008. 'params': {
  1009. 'skip_download': True,
  1010. },
  1011. },
  1012. {
  1013. # empty description results in an empty string
  1014. 'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
  1015. 'info_dict': {
  1016. 'id': 'x41yOUIvK2k',
  1017. 'ext': 'mp4',
  1018. 'title': 'IMG 3456',
  1019. 'description': '',
  1020. 'upload_date': '20170613',
  1021. 'uploader_id': 'ElevageOrVert',
  1022. 'uploader': 'ElevageOrVert',
  1023. },
  1024. 'params': {
  1025. 'skip_download': True,
  1026. },
  1027. },
  1028. {
  1029. # with '};' inside yt initial data (see https://github.com/ytdl-org/youtube-dl/issues/27093)
  1030. 'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
  1031. 'info_dict': {
  1032. 'id': 'CHqg6qOn4no',
  1033. 'ext': 'mp4',
  1034. 'title': 'Part 77 Sort a list of simple types in c#',
  1035. 'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
  1036. 'upload_date': '20130831',
  1037. 'uploader_id': 'kudvenkat',
  1038. 'uploader': 'kudvenkat',
  1039. },
  1040. 'params': {
  1041. 'skip_download': True,
  1042. },
  1043. },
  1044. ]
  1045. def __init__(self, *args, **kwargs):
  1046. super(YoutubeIE, self).__init__(*args, **kwargs)
  1047. self._player_cache = {}
  1048. def report_video_info_webpage_download(self, video_id):
  1049. """Report attempt to download video info webpage."""
  1050. self.to_screen('%s: Downloading video info webpage' % video_id)
  1051. def report_information_extraction(self, video_id):
  1052. """Report attempt to extract video information."""
  1053. self.to_screen('%s: Extracting video information' % video_id)
  1054. def report_unavailable_format(self, video_id, format):
  1055. """Report extracted video URL."""
  1056. self.to_screen('%s: Format %s not available' % (video_id, format))
  1057. def report_rtmp_download(self):
  1058. """Indicate the download will use the RTMP protocol."""
  1059. self.to_screen('RTMP download detected')
  1060. def _signature_cache_id(self, example_sig):
  1061. """ Return a string representation of a signature """
  1062. return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
  1063. @classmethod
  1064. def _extract_player_info(cls, player_url):
  1065. for player_re in cls._PLAYER_INFO_RE:
  1066. id_m = re.search(player_re, player_url)
  1067. if id_m:
  1068. break
  1069. else:
  1070. raise ExtractorError('Cannot identify player %r' % player_url)
  1071. return id_m.group('ext'), id_m.group('id')
  1072. def _extract_signature_function(self, video_id, player_url, example_sig):
  1073. player_type, player_id = self._extract_player_info(player_url)
  1074. # Read from filesystem cache
  1075. func_id = '%s_%s_%s' % (
  1076. player_type, player_id, self._signature_cache_id(example_sig))
  1077. assert os.path.basename(func_id) == func_id
  1078. cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
  1079. if cache_spec is not None:
  1080. return lambda s: ''.join(s[i] for i in cache_spec)
  1081. download_note = (
  1082. 'Downloading player %s' % player_url
  1083. if self._downloader.params.get('verbose') else
  1084. 'Downloading %s player %s' % (player_type, player_id)
  1085. )
  1086. if player_type == 'js':
  1087. code = self._download_webpage(
  1088. player_url, video_id,
  1089. note=download_note,
  1090. errnote='Download of %s failed' % player_url)
  1091. res = self._parse_sig_js(code)
  1092. elif player_type == 'swf':
  1093. urlh = self._request_webpage(
  1094. player_url, video_id,
  1095. note=download_note,
  1096. errnote='Download of %s failed' % player_url)
  1097. code = urlh.read()
  1098. res = self._parse_sig_swf(code)
  1099. else:
  1100. assert False, 'Invalid player type %r' % player_type
  1101. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1102. cache_res = res(test_string)
  1103. cache_spec = [ord(c) for c in cache_res]
  1104. self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
  1105. return res
  1106. def _print_sig_code(self, func, example_sig):
  1107. def gen_sig_code(idxs):
  1108. def _genslice(start, end, step):
  1109. starts = '' if start == 0 else str(start)
  1110. ends = (':%d' % (end + step)) if end + step >= 0 else ':'
  1111. steps = '' if step == 1 else (':%d' % step)
  1112. return 's[%s%s%s]' % (starts, ends, steps)
  1113. step = None
  1114. # Quelch pyflakes warnings - start will be set when step is set
  1115. start = '(Never used)'
  1116. for i, prev in zip(idxs[1:], idxs[:-1]):
  1117. if step is not None:
  1118. if i - prev == step:
  1119. continue
  1120. yield _genslice(start, prev, step)
  1121. step = None
  1122. continue
  1123. if i - prev in [-1, 1]:
  1124. step = i - prev
  1125. start = prev
  1126. continue
  1127. else:
  1128. yield 's[%d]' % prev
  1129. if step is None:
  1130. yield 's[%d]' % i
  1131. else:
  1132. yield _genslice(start, i, step)
  1133. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1134. cache_res = func(test_string)
  1135. cache_spec = [ord(c) for c in cache_res]
  1136. expr_code = ' + '.join(gen_sig_code(cache_spec))
  1137. signature_id_tuple = '(%s)' % (
  1138. ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
  1139. code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
  1140. ' return %s\n') % (signature_id_tuple, expr_code)
  1141. self.to_screen('Extracted signature function:\n' + code)
  1142. def _parse_sig_js(self, jscode):
  1143. funcname = self._search_regex(
  1144. (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1145. r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1146. r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
  1147. r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
  1148. # Obsolete patterns
  1149. r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1150. r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
  1151. r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1152. r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1153. r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1154. r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1155. r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1156. r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
  1157. jscode, 'Initial JS player signature function name', group='sig')
  1158. jsi = JSInterpreter(jscode)
  1159. initial_function = jsi.extract_function(funcname)
  1160. return lambda s: initial_function([s])
  1161. def _parse_sig_swf(self, file_contents):
  1162. swfi = SWFInterpreter(file_contents)
  1163. TARGET_CLASSNAME = 'SignatureDecipher'
  1164. searched_class = swfi.extract_class(TARGET_CLASSNAME)
  1165. initial_function = swfi.extract_function(searched_class, 'decipher')
  1166. return lambda s: initial_function([s])
  1167. def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
  1168. """Turn the encrypted s field into a working signature"""
  1169. if player_url is None:
  1170. raise ExtractorError('Cannot decrypt signature without player_url')
  1171. if player_url.startswith('//'):
  1172. player_url = 'https:' + player_url
  1173. elif not re.match(r'https?://', player_url):
  1174. player_url = compat_urlparse.urljoin(
  1175. 'https://www.youtube.com', player_url)
  1176. try:
  1177. player_id = (player_url, self._signature_cache_id(s))
  1178. if player_id not in self._player_cache:
  1179. func = self._extract_signature_function(
  1180. video_id, player_url, s
  1181. )
  1182. self._player_cache[player_id] = func
  1183. func = self._player_cache[player_id]
  1184. if self._downloader.params.get('youtube_print_sig_code'):
  1185. self._print_sig_code(func, s)
  1186. return func(s)
  1187. except Exception as e:
  1188. tb = traceback.format_exc()
  1189. raise ExtractorError(
  1190. 'Signature extraction failed: ' + tb, cause=e)
  1191. def _get_subtitles(self, video_id, webpage, has_live_chat_replay):
  1192. try:
  1193. subs_doc = self._download_xml(
  1194. 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
  1195. video_id, note=False)
  1196. except ExtractorError as err:
  1197. self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
  1198. return {}
  1199. sub_lang_list = {}
  1200. for track in subs_doc.findall('track'):
  1201. lang = track.attrib['lang_code']
  1202. if lang in sub_lang_list:
  1203. continue
  1204. sub_formats = []
  1205. for ext in self._SUBTITLE_FORMATS:
  1206. params = compat_urllib_parse_urlencode({
  1207. 'lang': lang,
  1208. 'v': video_id,
  1209. 'fmt': ext,
  1210. 'name': track.attrib['name'].encode('utf-8'),
  1211. })
  1212. sub_formats.append({
  1213. 'url': 'https://www.youtube.com/api/timedtext?' + params,
  1214. 'ext': ext,
  1215. })
  1216. sub_lang_list[lang] = sub_formats
  1217. if has_live_chat_replay:
  1218. sub_lang_list['live_chat'] = [
  1219. {
  1220. 'video_id': video_id,
  1221. 'ext': 'json',
  1222. 'protocol': 'youtube_live_chat_replay',
  1223. },
  1224. ]
  1225. if not sub_lang_list:
  1226. self._downloader.report_warning('video doesn\'t have subtitles')
  1227. return {}
  1228. return sub_lang_list
  1229. def _get_ytplayer_config(self, video_id, webpage):
  1230. patterns = (
  1231. # User data may contain arbitrary character sequences that may affect
  1232. # JSON extraction with regex, e.g. when '};' is contained the second
  1233. # regex won't capture the whole JSON. Yet working around by trying more
  1234. # concrete regex first keeping in mind proper quoted string handling
  1235. # to be implemented in future that will replace this workaround (see
  1236. # https://github.com/ytdl-org/youtube-dl/issues/7468,
  1237. # https://github.com/ytdl-org/youtube-dl/pull/7599)
  1238. r';ytplayer\.config\s*=\s*({.+?});ytplayer',
  1239. r';ytplayer\.config\s*=\s*({.+?});',
  1240. )
  1241. config = self._search_regex(
  1242. patterns, webpage, 'ytplayer.config', default=None)
  1243. if config:
  1244. return self._parse_json(
  1245. uppercase_escape(config), video_id, fatal=False)
  1246. def _get_automatic_captions(self, video_id, webpage):
  1247. """We need the webpage for getting the captions url, pass it as an
  1248. argument to speed up the process."""
  1249. self.to_screen('%s: Looking for automatic captions' % video_id)
  1250. player_config = self._get_ytplayer_config(video_id, webpage)
  1251. err_msg = 'Couldn\'t find automatic captions for %s' % video_id
  1252. if not player_config:
  1253. self._downloader.report_warning(err_msg)
  1254. return {}
  1255. try:
  1256. args = player_config['args']
  1257. caption_url = args.get('ttsurl')
  1258. if caption_url:
  1259. timestamp = args['timestamp']
  1260. # We get the available subtitles
  1261. list_params = compat_urllib_parse_urlencode({
  1262. 'type': 'list',
  1263. 'tlangs': 1,
  1264. 'asrs': 1,
  1265. })
  1266. list_url = caption_url + '&' + list_params
  1267. caption_list = self._download_xml(list_url, video_id)
  1268. original_lang_node = caption_list.find('track')
  1269. if original_lang_node is None:
  1270. self._downloader.report_warning('Video doesn\'t have automatic captions')
  1271. return {}
  1272. original_lang = original_lang_node.attrib['lang_code']
  1273. caption_kind = original_lang_node.attrib.get('kind', '')
  1274. sub_lang_list = {}
  1275. for lang_node in caption_list.findall('target'):
  1276. sub_lang = lang_node.attrib['lang_code']
  1277. sub_formats = []
  1278. for ext in self._SUBTITLE_FORMATS:
  1279. params = compat_urllib_parse_urlencode({
  1280. 'lang': original_lang,
  1281. 'tlang': sub_lang,
  1282. 'fmt': ext,
  1283. 'ts': timestamp,
  1284. 'kind': caption_kind,
  1285. })
  1286. sub_formats.append({
  1287. 'url': caption_url + '&' + params,
  1288. 'ext': ext,
  1289. })
  1290. sub_lang_list[sub_lang] = sub_formats
  1291. return sub_lang_list
  1292. def make_captions(sub_url, sub_langs):
  1293. parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
  1294. caption_qs = compat_parse_qs(parsed_sub_url.query)
  1295. captions = {}
  1296. for sub_lang in sub_langs:
  1297. sub_formats = []
  1298. for ext in self._SUBTITLE_FORMATS:
  1299. caption_qs.update({
  1300. 'tlang': [sub_lang],
  1301. 'fmt': [ext],
  1302. })
  1303. sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
  1304. query=compat_urllib_parse_urlencode(caption_qs, True)))
  1305. sub_formats.append({
  1306. 'url': sub_url,
  1307. 'ext': ext,
  1308. })
  1309. captions[sub_lang] = sub_formats
  1310. return captions
  1311. # New captions format as of 22.06.2017
  1312. player_response = args.get('player_response')
  1313. if player_response and isinstance(player_response, compat_str):
  1314. player_response = self._parse_json(
  1315. player_response, video_id, fatal=False)
  1316. if player_response:
  1317. renderer = player_response['captions']['playerCaptionsTracklistRenderer']
  1318. base_url = renderer['captionTracks'][0]['baseUrl']
  1319. sub_lang_list = []
  1320. for lang in renderer['translationLanguages']:
  1321. lang_code = lang.get('languageCode')
  1322. if lang_code:
  1323. sub_lang_list.append(lang_code)
  1324. return make_captions(base_url, sub_lang_list)
  1325. # Some videos don't provide ttsurl but rather caption_tracks and
  1326. # caption_translation_languages (e.g. 20LmZk1hakA)
  1327. # Does not used anymore as of 22.06.2017
  1328. caption_tracks = args['caption_tracks']
  1329. caption_translation_languages = args['caption_translation_languages']
  1330. caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
  1331. sub_lang_list = []
  1332. for lang in caption_translation_languages.split(','):
  1333. lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
  1334. sub_lang = lang_qs.get('lc', [None])[0]
  1335. if sub_lang:
  1336. sub_lang_list.append(sub_lang)
  1337. return make_captions(caption_url, sub_lang_list)
  1338. # An extractor error can be raise by the download process if there are
  1339. # no automatic captions but there are subtitles
  1340. except (KeyError, IndexError, ExtractorError):
  1341. self._downloader.report_warning(err_msg)
  1342. return {}
  1343. def _mark_watched(self, video_id, video_info, player_response):
  1344. playback_url = url_or_none(try_get(
  1345. player_response,
  1346. lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get(
  1347. video_info, lambda x: x['videostats_playback_base_url'][0]))
  1348. if not playback_url:
  1349. return
  1350. parsed_playback_url = compat_urlparse.urlparse(playback_url)
  1351. qs = compat_urlparse.parse_qs(parsed_playback_url.query)
  1352. # cpn generation algorithm is reverse engineered from base.js.
  1353. # In fact it works even with dummy cpn.
  1354. CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
  1355. cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
  1356. qs.update({
  1357. 'ver': ['2'],
  1358. 'cpn': [cpn],
  1359. })
  1360. playback_url = compat_urlparse.urlunparse(
  1361. parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
  1362. self._download_webpage(
  1363. playback_url, video_id, 'Marking watched',
  1364. 'Unable to mark watched', fatal=False)
  1365. @staticmethod
  1366. def _extract_urls(webpage):
  1367. # Embedded YouTube player
  1368. entries = [
  1369. unescapeHTML(mobj.group('url'))
  1370. for mobj in re.finditer(r'''(?x)
  1371. (?:
  1372. <iframe[^>]+?src=|
  1373. data-video-url=|
  1374. <embed[^>]+?src=|
  1375. embedSWF\(?:\s*|
  1376. <object[^>]+data=|
  1377. new\s+SWFObject\(
  1378. )
  1379. (["\'])
  1380. (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
  1381. (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
  1382. \1''', webpage)]
  1383. # lazyYT YouTube embed
  1384. entries.extend(list(map(
  1385. unescapeHTML,
  1386. re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
  1387. # Wordpress "YouTube Video Importer" plugin
  1388. matches = re.findall(r'''(?x)<div[^>]+
  1389. class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
  1390. data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
  1391. entries.extend(m[-1] for m in matches)
  1392. return entries
  1393. @staticmethod
  1394. def _extract_url(webpage):
  1395. urls = YoutubeIE._extract_urls(webpage)
  1396. return urls[0] if urls else None
  1397. @classmethod
  1398. def extract_id(cls, url):
  1399. mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
  1400. if mobj is None:
  1401. raise ExtractorError('Invalid URL: %s' % url)
  1402. video_id = mobj.group(2)
  1403. return video_id
  1404. def _extract_chapters_from_json(self, webpage, video_id, duration):
  1405. if not webpage:
  1406. return
  1407. data = self._extract_yt_initial_data(video_id, webpage)
  1408. if not data or not isinstance(data, dict):
  1409. return
  1410. chapters_list = try_get(
  1411. data,
  1412. lambda x: x['playerOverlays']
  1413. ['playerOverlayRenderer']
  1414. ['decoratedPlayerBarRenderer']
  1415. ['decoratedPlayerBarRenderer']
  1416. ['playerBar']
  1417. ['chapteredPlayerBarRenderer']
  1418. ['chapters'],
  1419. list)
  1420. if not chapters_list:
  1421. return
  1422. def chapter_time(chapter):
  1423. return float_or_none(
  1424. try_get(
  1425. chapter,
  1426. lambda x: x['chapterRenderer']['timeRangeStartMillis'],
  1427. int),
  1428. scale=1000)
  1429. chapters = []
  1430. for next_num, chapter in enumerate(chapters_list, start=1):
  1431. start_time = chapter_time(chapter)
  1432. if start_time is None:
  1433. continue
  1434. end_time = (chapter_time(chapters_list[next_num])
  1435. if next_num < len(chapters_list) else duration)
  1436. if end_time is None:
  1437. continue
  1438. title = try_get(
  1439. chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
  1440. compat_str)
  1441. chapters.append({
  1442. 'start_time': start_time,
  1443. 'end_time': end_time,
  1444. 'title': title,
  1445. })
  1446. return chapters
  1447. @staticmethod
  1448. def _extract_chapters_from_description(description, duration):
  1449. if not description:
  1450. return None
  1451. chapter_lines = re.findall(
  1452. r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
  1453. description)
  1454. if not chapter_lines:
  1455. return None
  1456. chapters = []
  1457. for next_num, (chapter_line, time_point) in enumerate(
  1458. chapter_lines, start=1):
  1459. start_time = parse_duration(time_point)
  1460. if start_time is None:
  1461. continue
  1462. if start_time > duration:
  1463. break
  1464. end_time = (duration if next_num == len(chapter_lines)
  1465. else parse_duration(chapter_lines[next_num][1]))
  1466. if end_time is None:
  1467. continue
  1468. if end_time > duration:
  1469. end_time = duration
  1470. if start_time > end_time:
  1471. break
  1472. chapter_title = re.sub(
  1473. r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
  1474. chapter_title = re.sub(r'\s+', ' ', chapter_title)
  1475. chapters.append({
  1476. 'start_time': start_time,
  1477. 'end_time': end_time,
  1478. 'title': chapter_title,
  1479. })
  1480. return chapters
  1481. def _extract_chapters(self, webpage, description, video_id, duration):
  1482. return (self._extract_chapters_from_json(webpage, video_id, duration)
  1483. or self._extract_chapters_from_description(description, duration))
  1484. def _real_extract(self, url):
  1485. url, smuggled_data = unsmuggle_url(url, {})
  1486. proto = (
  1487. 'http' if self._downloader.params.get('prefer_insecure', False)
  1488. else 'https')
  1489. start_time = None
  1490. end_time = None
  1491. parsed_url = compat_urllib_parse_urlparse(url)
  1492. for component in [parsed_url.fragment, parsed_url.query]:
  1493. query = compat_parse_qs(component)
  1494. if start_time is None and 't' in query:
  1495. start_time = parse_duration(query['t'][0])
  1496. if start_time is None and 'start' in query:
  1497. start_time = parse_duration(query['start'][0])
  1498. if end_time is None and 'end' in query:
  1499. end_time = parse_duration(query['end'][0])
  1500. # Extract original video URL from URL with redirection, like age verification, using next_url parameter
  1501. mobj = re.search(self._NEXT_URL_RE, url)
  1502. if mobj:
  1503. url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
  1504. video_id = self.extract_id(url)
  1505. # Get video webpage
  1506. url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
  1507. video_webpage, urlh = self._download_webpage_handle(url, video_id)
  1508. qs = compat_parse_qs(compat_urllib_parse_urlparse(urlh.geturl()).query)
  1509. video_id = qs.get('v', [None])[0] or video_id
  1510. # Attempt to extract SWF player URL
  1511. mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
  1512. if mobj is not None:
  1513. player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
  1514. else:
  1515. player_url = None
  1516. dash_mpds = []
  1517. def add_dash_mpd(video_info):
  1518. dash_mpd = video_info.get('dashmpd')
  1519. if dash_mpd and dash_mpd[0] not in dash_mpds:
  1520. dash_mpds.append(dash_mpd[0])
  1521. def add_dash_mpd_pr(pl_response):
  1522. dash_mpd = url_or_none(try_get(
  1523. pl_response, lambda x: x['streamingData']['dashManifestUrl'],
  1524. compat_str))
  1525. if dash_mpd and dash_mpd not in dash_mpds:
  1526. dash_mpds.append(dash_mpd)
  1527. is_live = None
  1528. view_count = None
  1529. def extract_view_count(v_info):
  1530. return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
  1531. def extract_player_response(player_response, video_id):
  1532. pl_response = str_or_none(player_response)
  1533. if not pl_response:
  1534. return
  1535. pl_response = self._parse_json(pl_response, video_id, fatal=False)
  1536. if isinstance(pl_response, dict):
  1537. add_dash_mpd_pr(pl_response)
  1538. return pl_response
  1539. def extract_embedded_config(embed_webpage, video_id):
  1540. embedded_config = self._search_regex(
  1541. r'setConfig\(({.*})\);',
  1542. embed_webpage, 'ytInitialData', default=None)
  1543. if embedded_config:
  1544. return embedded_config
  1545. player_response = {}
  1546. # Get video info
  1547. video_info = {}
  1548. embed_webpage = None
  1549. if (self._og_search_property('restrictions:age', video_webpage, default=None) == '18+'
  1550. or re.search(r'player-age-gate-content">', video_webpage) is not None):
  1551. cookie_keys = self._get_cookies('https://www.youtube.com').keys()
  1552. age_gate = True
  1553. # We simulate the access to the video from www.youtube.com/v/{video_id}
  1554. # this can be viewed without login into Youtube
  1555. url = proto + '://www.youtube.com/embed/%s' % video_id
  1556. embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
  1557. ext = extract_embedded_config(embed_webpage, video_id)
  1558. # playabilityStatus = re.search(r'{\\\"status\\\":\\\"(?P<playabilityStatus>[^\"]+)\\\"', ext)
  1559. playable_in_embed = re.search(r'{\\\"playableInEmbed\\\":(?P<playableinEmbed>[^\,]+)', ext)
  1560. if not playable_in_embed:
  1561. self.to_screen('Could not determine whether playabale in embed for video %s' % video_id)
  1562. playable_in_embed = ''
  1563. else:
  1564. playable_in_embed = playable_in_embed.group('playableinEmbed')
  1565. # check if video is only playable on youtube in other words not playable in embed - if so it requires auth (cookies)
  1566. # if re.search(r'player-unavailable">', embed_webpage) is not None:
  1567. if playable_in_embed == 'false':
  1568. '''
  1569. # TODO apply this patch when Support for Python 2.6(!) and above drops
  1570. if ({'VISITOR_INFO1_LIVE', 'HSID', 'SSID', 'SID'} <= cookie_keys
  1571. or {'VISITOR_INFO1_LIVE', '__Secure-3PSID', 'LOGIN_INFO'} <= cookie_keys):
  1572. '''
  1573. if (set(('VISITOR_INFO1_LIVE', 'HSID', 'SSID', 'SID')) <= set(cookie_keys)
  1574. or set(('VISITOR_INFO1_LIVE', '__Secure-3PSID', 'LOGIN_INFO')) <= set(cookie_keys)):
  1575. age_gate = False
  1576. # Try looking directly into the video webpage
  1577. ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
  1578. if ytplayer_config:
  1579. args = ytplayer_config.get("args")
  1580. if args is not None:
  1581. if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
  1582. # Convert to the same format returned by compat_parse_qs
  1583. video_info = dict((k, [v]) for k, v in args.items())
  1584. add_dash_mpd(video_info)
  1585. # Rental video is not rented but preview is available (e.g.
  1586. # https://www.youtube.com/watch?v=yYr8q0y5Jfg,
  1587. # https://github.com/ytdl-org/youtube-dl/issues/10532)
  1588. if not video_info and args.get('ypc_vid'):
  1589. return self.url_result(
  1590. args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
  1591. if args.get('livestream') == '1' or args.get('live_playback') == 1:
  1592. is_live = True
  1593. if not player_response:
  1594. player_response = extract_player_response(args.get('player_response'), video_id)
  1595. elif not player_response:
  1596. player_response = ytplayer_config
  1597. if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
  1598. add_dash_mpd_pr(player_response)
  1599. else:
  1600. raise ExtractorError('Video is age restricted and only playable on Youtube. Requires cookies!', expected=True)
  1601. else:
  1602. data = compat_urllib_parse_urlencode({
  1603. 'video_id': video_id,
  1604. 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
  1605. 'sts': self._search_regex(
  1606. r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
  1607. })
  1608. video_info_url = proto + '://www.youtube.com/get_video_info?' + data
  1609. try:
  1610. video_info_webpage = self._download_webpage(
  1611. video_info_url, video_id,
  1612. note='Refetching age-gated info webpage',
  1613. errnote='unable to download video info webpage')
  1614. except ExtractorError:
  1615. video_info_webpage = None
  1616. if video_info_webpage:
  1617. video_info = compat_parse_qs(video_info_webpage)
  1618. pl_response = video_info.get('player_response', [None])[0]
  1619. player_response = extract_player_response(pl_response, video_id)
  1620. add_dash_mpd(video_info)
  1621. view_count = extract_view_count(video_info)
  1622. else:
  1623. age_gate = False
  1624. # Try looking directly into the video webpage
  1625. ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
  1626. if ytplayer_config:
  1627. args = ytplayer_config.get('args', {})
  1628. if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
  1629. # Convert to the same format returned by compat_parse_qs
  1630. video_info = dict((k, [v]) for k, v in args.items())
  1631. add_dash_mpd(video_info)
  1632. # Rental video is not rented but preview is available (e.g.
  1633. # https://www.youtube.com/watch?v=yYr8q0y5Jfg,
  1634. # https://github.com/ytdl-org/youtube-dl/issues/10532)
  1635. if not video_info and args.get('ypc_vid'):
  1636. return self.url_result(
  1637. args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
  1638. if args.get('livestream') == '1' or args.get('live_playback') == 1:
  1639. is_live = True
  1640. if not player_response:
  1641. player_response = extract_player_response(args.get('player_response'), video_id)
  1642. if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
  1643. add_dash_mpd_pr(player_response)
  1644. if not video_info and not player_response:
  1645. player_response = extract_player_response(
  1646. self._search_regex(
  1647. r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;', video_webpage,
  1648. 'initial player response', default='{}'),
  1649. video_id)
  1650. def extract_unavailable_message():
  1651. messages = []
  1652. for tag, kind in (('h1', 'message'), ('div', 'submessage')):
  1653. msg = self._html_search_regex(
  1654. r'(?s)<{tag}[^>]+id=["\']unavailable-{kind}["\'][^>]*>(.+?)</{tag}>'.format(tag=tag, kind=kind),
  1655. video_webpage, 'unavailable %s' % kind, default=None)
  1656. if msg:
  1657. messages.append(msg)
  1658. if messages:
  1659. return '\n'.join(messages)
  1660. if not video_info and not player_response:
  1661. unavailable_message = extract_unavailable_message()
  1662. if not unavailable_message:
  1663. unavailable_message = 'Unable to extract video data'
  1664. raise ExtractorError(
  1665. 'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id)
  1666. if not isinstance(video_info, dict):
  1667. video_info = {}
  1668. video_details = try_get(
  1669. player_response, lambda x: x['videoDetails'], dict) or {}
  1670. microformat = try_get(
  1671. player_response, lambda x: x['microformat']['playerMicroformatRenderer'], dict) or {}
  1672. video_title = video_info.get('title', [None])[0] or video_details.get('title')
  1673. if not video_title:
  1674. self._downloader.report_warning('Unable to extract video title')
  1675. video_title = '_'
  1676. description_original = video_description = get_element_by_id("eow-description", video_webpage)
  1677. if video_description:
  1678. def replace_url(m):
  1679. redir_url = compat_urlparse.urljoin(url, m.group(1))
  1680. parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
  1681. if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
  1682. qs = compat_parse_qs(parsed_redir_url.query)
  1683. q = qs.get('q')
  1684. if q and q[0]:
  1685. return q[0]
  1686. return redir_url
  1687. description_original = video_description = re.sub(r'''(?x)
  1688. <a\s+
  1689. (?:[a-zA-Z-]+="[^"]*"\s+)*?
  1690. (?:title|href)="([^"]+)"\s+
  1691. (?:[a-zA-Z-]+="[^"]*"\s+)*?
  1692. class="[^"]*"[^>]*>
  1693. [^<]+\.{3}\s*
  1694. </a>
  1695. ''', replace_url, video_description)
  1696. video_description = clean_html(video_description)
  1697. else:
  1698. video_description = video_details.get('shortDescription')
  1699. if video_description is None:
  1700. video_description = self._html_search_meta('description', video_webpage)
  1701. if not smuggled_data.get('force_singlefeed', False):
  1702. if not self._downloader.params.get('noplaylist'):
  1703. multifeed_metadata_list = try_get(
  1704. player_response,
  1705. lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
  1706. compat_str) or try_get(
  1707. video_info, lambda x: x['multifeed_metadata_list'][0], compat_str)
  1708. if multifeed_metadata_list:
  1709. entries = []
  1710. feed_ids = []
  1711. for feed in multifeed_metadata_list.split(','):
  1712. # Unquote should take place before split on comma (,) since textual
  1713. # fields may contain comma as well (see
  1714. # https://github.com/ytdl-org/youtube-dl/issues/8536)
  1715. feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
  1716. def feed_entry(name):
  1717. return try_get(feed_data, lambda x: x[name][0], compat_str)
  1718. feed_id = feed_entry('id')
  1719. if not feed_id:
  1720. continue
  1721. feed_title = feed_entry('title')
  1722. title = video_title
  1723. if feed_title:
  1724. title += ' (%s)' % feed_title
  1725. entries.append({
  1726. '_type': 'url_transparent',
  1727. 'ie_key': 'Youtube',
  1728. 'url': smuggle_url(
  1729. '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
  1730. {'force_singlefeed': True}),
  1731. 'title': title,
  1732. })
  1733. feed_ids.append(feed_id)
  1734. self.to_screen(
  1735. 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
  1736. % (', '.join(feed_ids), video_id))
  1737. return self.playlist_result(entries, video_id, video_title, video_description)
  1738. else:
  1739. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  1740. if view_count is None:
  1741. view_count = extract_view_count(video_info)
  1742. if view_count is None and video_details:
  1743. view_count = int_or_none(video_details.get('viewCount'))
  1744. if view_count is None and microformat:
  1745. view_count = int_or_none(microformat.get('viewCount'))
  1746. if is_live is None:
  1747. is_live = bool_or_none(video_details.get('isLive'))
  1748. has_live_chat_replay = False
  1749. if not is_live:
  1750. yt_initial_data = self._get_yt_initial_data(video_id, video_webpage)
  1751. try:
  1752. yt_initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
  1753. has_live_chat_replay = True
  1754. except (KeyError, IndexError, TypeError):
  1755. pass
  1756. # Check for "rental" videos
  1757. if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
  1758. raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True)
  1759. def _extract_filesize(media_url):
  1760. return int_or_none(self._search_regex(
  1761. r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
  1762. streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list) or []
  1763. streaming_formats.extend(try_get(player_response, lambda x: x['streamingData']['adaptiveFormats'], list) or [])
  1764. if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
  1765. self.report_rtmp_download()
  1766. formats = [{
  1767. 'format_id': '_rtmp',
  1768. 'protocol': 'rtmp',
  1769. 'url': video_info['conn'][0],
  1770. 'player_url': player_url,
  1771. }]
  1772. elif not is_live and (streaming_formats or len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
  1773. encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
  1774. if 'rtmpe%3Dyes' in encoded_url_map:
  1775. raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True)
  1776. formats = []
  1777. formats_spec = {}
  1778. fmt_list = video_info.get('fmt_list', [''])[0]
  1779. if fmt_list:
  1780. for fmt in fmt_list.split(','):
  1781. spec = fmt.split('/')
  1782. if len(spec) > 1:
  1783. width_height = spec[1].split('x')
  1784. if len(width_height) == 2:
  1785. formats_spec[spec[0]] = {
  1786. 'resolution': spec[1],
  1787. 'width': int_or_none(width_height[0]),
  1788. 'height': int_or_none(width_height[1]),
  1789. }
  1790. for fmt in streaming_formats:
  1791. itag = str_or_none(fmt.get('itag'))
  1792. if not itag:
  1793. continue
  1794. quality = fmt.get('quality')
  1795. quality_label = fmt.get('qualityLabel') or quality
  1796. formats_spec[itag] = {
  1797. 'asr': int_or_none(fmt.get('audioSampleRate')),
  1798. 'filesize': int_or_none(fmt.get('contentLength')),
  1799. 'format_note': quality_label,
  1800. 'fps': int_or_none(fmt.get('fps')),
  1801. 'height': int_or_none(fmt.get('height')),
  1802. # bitrate for itag 43 is always 2147483647
  1803. 'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None,
  1804. 'width': int_or_none(fmt.get('width')),
  1805. }
  1806. for fmt in streaming_formats:
  1807. if fmt.get('drmFamilies') or fmt.get('drm_families'):
  1808. continue
  1809. url = url_or_none(fmt.get('url'))
  1810. if not url:
  1811. cipher = fmt.get('cipher') or fmt.get('signatureCipher')
  1812. if not cipher:
  1813. continue
  1814. url_data = compat_parse_qs(cipher)
  1815. url = url_or_none(try_get(url_data, lambda x: x['url'][0], compat_str))
  1816. if not url:
  1817. continue
  1818. else:
  1819. cipher = None
  1820. url_data = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
  1821. stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
  1822. # Unsupported FORMAT_STREAM_TYPE_OTF
  1823. if stream_type == 3:
  1824. continue
  1825. format_id = fmt.get('itag') or url_data['itag'][0]
  1826. if not format_id:
  1827. continue
  1828. format_id = compat_str(format_id)
  1829. if cipher:
  1830. if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
  1831. ASSETS_RE = (
  1832. r'<script[^>]+\bsrc=("[^"]+")[^>]+\bname=["\']player_ias/base',
  1833. r'"jsUrl"\s*:\s*("[^"]+")',
  1834. r'"assets":.+?"js":\s*("[^"]+")')
  1835. jsplayer_url_json = self._search_regex(
  1836. ASSETS_RE,
  1837. embed_webpage if age_gate else video_webpage,
  1838. 'JS player URL (1)', default=None)
  1839. if not jsplayer_url_json and not age_gate:
  1840. # We need the embed website after all
  1841. if embed_webpage is None:
  1842. embed_url = proto + '://www.youtube.com/embed/%s' % video_id
  1843. embed_webpage = self._download_webpage(
  1844. embed_url, video_id, 'Downloading embed webpage')
  1845. jsplayer_url_json = self._search_regex(
  1846. ASSETS_RE, embed_webpage, 'JS player URL')
  1847. player_url = json.loads(jsplayer_url_json)
  1848. if player_url is None:
  1849. player_url_json = self._search_regex(
  1850. r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
  1851. video_webpage, 'age gate player URL')
  1852. player_url = json.loads(player_url_json)
  1853. if 'sig' in url_data:
  1854. url += '&signature=' + url_data['sig'][0]
  1855. elif 's' in url_data:
  1856. encrypted_sig = url_data['s'][0]
  1857. if self._downloader.params.get('verbose'):
  1858. if player_url is None:
  1859. player_desc = 'unknown'
  1860. else:
  1861. player_type, player_version = self._extract_player_info(player_url)
  1862. player_desc = '%s player %s' % ('flash' if player_type == 'swf' else 'html5', player_version)
  1863. parts_sizes = self._signature_cache_id(encrypted_sig)
  1864. self.to_screen('{%s} signature length %s, %s' %
  1865. (format_id, parts_sizes, player_desc))
  1866. signature = self._decrypt_signature(
  1867. encrypted_sig, video_id, player_url, age_gate)
  1868. sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature'
  1869. url += '&%s=%s' % (sp, signature)
  1870. if 'ratebypass' not in url:
  1871. url += '&ratebypass=yes'
  1872. dct = {
  1873. 'format_id': format_id,
  1874. 'url': url,
  1875. 'player_url': player_url,
  1876. }
  1877. if format_id in self._formats:
  1878. dct.update(self._formats[format_id])
  1879. if format_id in formats_spec:
  1880. dct.update(formats_spec[format_id])
  1881. # Some itags are not included in DASH manifest thus corresponding formats will
  1882. # lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993).
  1883. # Trying to extract metadata from url_encoded_fmt_stream_map entry.
  1884. mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
  1885. width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
  1886. if width is None:
  1887. width = int_or_none(fmt.get('width'))
  1888. if height is None:
  1889. height = int_or_none(fmt.get('height'))
  1890. filesize = int_or_none(url_data.get(
  1891. 'clen', [None])[0]) or _extract_filesize(url)
  1892. quality = url_data.get('quality', [None])[0] or fmt.get('quality')
  1893. quality_label = url_data.get('quality_label', [None])[0] or fmt.get('qualityLabel')
  1894. tbr = (float_or_none(url_data.get('bitrate', [None])[0], 1000)
  1895. or float_or_none(fmt.get('bitrate'), 1000)) if format_id != '43' else None
  1896. fps = int_or_none(url_data.get('fps', [None])[0]) or int_or_none(fmt.get('fps'))
  1897. more_fields = {
  1898. 'filesize': filesize,
  1899. 'tbr': tbr,
  1900. 'width': width,
  1901. 'height': height,
  1902. 'fps': fps,
  1903. 'format_note': quality_label or quality,
  1904. }
  1905. for key, value in more_fields.items():
  1906. if value:
  1907. dct[key] = value
  1908. type_ = url_data.get('type', [None])[0] or fmt.get('mimeType')
  1909. if type_:
  1910. type_split = type_.split(';')
  1911. kind_ext = type_split[0].split('/')
  1912. if len(kind_ext) == 2:
  1913. kind, _ = kind_ext
  1914. dct['ext'] = mimetype2ext(type_split[0])
  1915. if kind in ('audio', 'video'):
  1916. codecs = None
  1917. for mobj in re.finditer(
  1918. r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
  1919. if mobj.group('key') == 'codecs':
  1920. codecs = mobj.group('val')
  1921. break
  1922. if codecs:
  1923. dct.update(parse_codecs(codecs))
  1924. if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
  1925. dct['downloader_options'] = {
  1926. # Youtube throttles chunks >~10M
  1927. 'http_chunk_size': 10485760,
  1928. }
  1929. formats.append(dct)
  1930. else:
  1931. manifest_url = (
  1932. url_or_none(try_get(
  1933. player_response,
  1934. lambda x: x['streamingData']['hlsManifestUrl'],
  1935. compat_str))
  1936. or url_or_none(try_get(
  1937. video_info, lambda x: x['hlsvp'][0], compat_str)))
  1938. if manifest_url:
  1939. formats = []
  1940. m3u8_formats = self._extract_m3u8_formats(
  1941. manifest_url, video_id, 'mp4', fatal=False)
  1942. for a_format in m3u8_formats:
  1943. itag = self._search_regex(
  1944. r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
  1945. if itag:
  1946. a_format['format_id'] = itag
  1947. if itag in self._formats:
  1948. dct = self._formats[itag].copy()
  1949. dct.update(a_format)
  1950. a_format = dct
  1951. a_format['player_url'] = player_url
  1952. # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
  1953. a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
  1954. if self._downloader.params.get('youtube_include_hls_manifest', True):
  1955. formats.append(a_format)
  1956. else:
  1957. error_message = extract_unavailable_message()
  1958. if not error_message:
  1959. reason_list = try_get(
  1960. player_response,
  1961. lambda x: x['playabilityStatus']['errorScreen']['playerErrorMessageRenderer']['subreason']['runs'],
  1962. list) or []
  1963. for reason in reason_list:
  1964. if not isinstance(reason, dict):
  1965. continue
  1966. reason_text = try_get(reason, lambda x: x['text'], compat_str)
  1967. if reason_text:
  1968. if not error_message:
  1969. error_message = ''
  1970. error_message += reason_text
  1971. if error_message:
  1972. error_message = clean_html(error_message)
  1973. if not error_message:
  1974. error_message = clean_html(try_get(
  1975. player_response, lambda x: x['playabilityStatus']['reason'],
  1976. compat_str))
  1977. if not error_message:
  1978. error_message = clean_html(
  1979. try_get(video_info, lambda x: x['reason'][0], compat_str))
  1980. if error_message:
  1981. raise ExtractorError(error_message, expected=True)
  1982. raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info')
  1983. # uploader
  1984. video_uploader = try_get(
  1985. video_info, lambda x: x['author'][0],
  1986. compat_str) or str_or_none(video_details.get('author'))
  1987. if video_uploader:
  1988. video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
  1989. else:
  1990. self._downloader.report_warning('unable to extract uploader name')
  1991. # uploader_id
  1992. video_uploader_id = None
  1993. video_uploader_url = None
  1994. mobj = re.search(
  1995. r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
  1996. video_webpage)
  1997. if mobj is not None:
  1998. video_uploader_id = mobj.group('uploader_id')
  1999. video_uploader_url = mobj.group('uploader_url')
  2000. else:
  2001. owner_profile_url = url_or_none(microformat.get('ownerProfileUrl'))
  2002. if owner_profile_url:
  2003. video_uploader_id = self._search_regex(
  2004. r'(?:user|channel)/([^/]+)', owner_profile_url, 'uploader id',
  2005. default=None)
  2006. video_uploader_url = owner_profile_url
  2007. channel_id = (
  2008. str_or_none(video_details.get('channelId'))
  2009. or self._html_search_meta(
  2010. 'channelId', video_webpage, 'channel id', default=None)
  2011. or self._search_regex(
  2012. r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
  2013. video_webpage, 'channel id', default=None, group='id'))
  2014. channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
  2015. thumbnails = []
  2016. thumbnails_list = try_get(
  2017. video_details, lambda x: x['thumbnail']['thumbnails'], list) or []
  2018. for t in thumbnails_list:
  2019. if not isinstance(t, dict):
  2020. continue
  2021. thumbnail_url = url_or_none(t.get('url'))
  2022. if not thumbnail_url:
  2023. continue
  2024. thumbnails.append({
  2025. 'url': thumbnail_url,
  2026. 'width': int_or_none(t.get('width')),
  2027. 'height': int_or_none(t.get('height')),
  2028. })
  2029. if not thumbnails:
  2030. video_thumbnail = None
  2031. # We try first to get a high quality image:
  2032. m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
  2033. video_webpage, re.DOTALL)
  2034. if m_thumb is not None:
  2035. video_thumbnail = m_thumb.group(1)
  2036. thumbnail_url = try_get(video_info, lambda x: x['thumbnail_url'][0], compat_str)
  2037. if thumbnail_url:
  2038. video_thumbnail = compat_urllib_parse_unquote_plus(thumbnail_url)
  2039. if video_thumbnail:
  2040. thumbnails.append({'url': video_thumbnail})
  2041. # upload date
  2042. upload_date = self._html_search_meta(
  2043. 'datePublished', video_webpage, 'upload date', default=None)
  2044. if not upload_date:
  2045. upload_date = self._search_regex(
  2046. [r'(?s)id="eow-date.*?>(.*?)</span>',
  2047. r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
  2048. video_webpage, 'upload date', default=None)
  2049. if not upload_date:
  2050. upload_date = microformat.get('publishDate') or microformat.get('uploadDate')
  2051. upload_date = unified_strdate(upload_date)
  2052. video_license = self._html_search_regex(
  2053. r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
  2054. video_webpage, 'license', default=None)
  2055. m_music = re.search(
  2056. r'''(?x)
  2057. <h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
  2058. <ul[^>]*>\s*
  2059. <li>(?P<title>.+?)
  2060. by (?P<creator>.+?)
  2061. (?:
  2062. \(.+?\)|
  2063. <a[^>]*
  2064. (?:
  2065. \bhref=["\']/red[^>]*>| # drop possible
  2066. >\s*Listen ad-free with YouTube Red # YouTube Red ad
  2067. )
  2068. .*?
  2069. )?</li
  2070. ''',
  2071. video_webpage)
  2072. if m_music:
  2073. video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
  2074. video_creator = clean_html(m_music.group('creator'))
  2075. else:
  2076. video_alt_title = video_creator = None
  2077. def extract_meta(field):
  2078. return self._html_search_regex(
  2079. r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
  2080. video_webpage, field, default=None)
  2081. track = extract_meta('Song')
  2082. artist = extract_meta('Artist')
  2083. album = extract_meta('Album')
  2084. # Youtube Music Auto-generated description
  2085. release_date = release_year = None
  2086. if video_description:
  2087. mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
  2088. if mobj:
  2089. if not track:
  2090. track = mobj.group('track').strip()
  2091. if not artist:
  2092. artist = mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·'))
  2093. if not album:
  2094. album = mobj.group('album'.strip())
  2095. release_year = mobj.group('release_year')
  2096. release_date = mobj.group('release_date')
  2097. if release_date:
  2098. release_date = release_date.replace('-', '')
  2099. if not release_year:
  2100. release_year = int(release_date[:4])
  2101. if release_year:
  2102. release_year = int(release_year)
  2103. yt_initial_data = self._extract_yt_initial_data(video_id, video_webpage)
  2104. contents = try_get(yt_initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or []
  2105. for content in contents:
  2106. rows = try_get(content, lambda x: x['videoSecondaryInfoRenderer']['metadataRowContainer']['metadataRowContainerRenderer']['rows'], list) or []
  2107. multiple_songs = False
  2108. for row in rows:
  2109. if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
  2110. multiple_songs = True
  2111. break
  2112. for row in rows:
  2113. mrr = row.get('metadataRowRenderer') or {}
  2114. mrr_title = try_get(
  2115. mrr, lambda x: x['title']['simpleText'], compat_str)
  2116. mrr_contents = try_get(
  2117. mrr, lambda x: x['contents'][0], dict) or {}
  2118. mrr_contents_text = try_get(mrr_contents, [lambda x: x['simpleText'], lambda x: x['runs'][0]['text']], compat_str)
  2119. if not (mrr_title and mrr_contents_text):
  2120. continue
  2121. if mrr_title == 'License':
  2122. video_license = mrr_contents_text
  2123. elif not multiple_songs:
  2124. if mrr_title == 'Album':
  2125. album = mrr_contents_text
  2126. elif mrr_title == 'Artist':
  2127. artist = mrr_contents_text
  2128. elif mrr_title == 'Song':
  2129. track = mrr_contents_text
  2130. m_episode = re.search(
  2131. r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
  2132. video_webpage)
  2133. if m_episode:
  2134. series = unescapeHTML(m_episode.group('series'))
  2135. season_number = int(m_episode.group('season'))
  2136. episode_number = int(m_episode.group('episode'))
  2137. else:
  2138. series = season_number = episode_number = None
  2139. m_cat_container = self._search_regex(
  2140. r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
  2141. video_webpage, 'categories', default=None)
  2142. category = None
  2143. if m_cat_container:
  2144. category = self._html_search_regex(
  2145. r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
  2146. default=None)
  2147. if not category:
  2148. category = try_get(
  2149. microformat, lambda x: x['category'], compat_str)
  2150. video_categories = None if category is None else [category]
  2151. video_tags = [
  2152. unescapeHTML(m.group('content'))
  2153. for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
  2154. if not video_tags:
  2155. video_tags = try_get(video_details, lambda x: x['keywords'], list)
  2156. def _extract_count(count_name):
  2157. return str_to_int(self._search_regex(
  2158. (r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>' % re.escape(count_name),
  2159. r'["\']label["\']\s*:\s*["\']([\d,.]+)\s+%ss["\']' % re.escape(count_name)),
  2160. video_webpage, count_name, default=None))
  2161. like_count = _extract_count('like')
  2162. dislike_count = _extract_count('dislike')
  2163. if view_count is None:
  2164. view_count = str_to_int(self._search_regex(
  2165. r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
  2166. 'view count', default=None))
  2167. average_rating = (
  2168. float_or_none(video_details.get('averageRating'))
  2169. or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0])))
  2170. # subtitles
  2171. video_subtitles = self.extract_subtitles(
  2172. video_id, video_webpage, has_live_chat_replay)
  2173. automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
  2174. video_duration = try_get(
  2175. video_info, lambda x: int_or_none(x['length_seconds'][0]))
  2176. if not video_duration:
  2177. video_duration = int_or_none(video_details.get('lengthSeconds'))
  2178. if not video_duration:
  2179. video_duration = parse_duration(self._html_search_meta(
  2180. 'duration', video_webpage, 'video duration'))
  2181. # Get Subscriber Count of channel
  2182. subscriber_count = parse_count(self._search_regex(
  2183. r'"text":"([\d\.]+\w?) subscribers"',
  2184. video_webpage,
  2185. 'subscriber count',
  2186. default=None
  2187. ))
  2188. # annotations
  2189. video_annotations = None
  2190. if self._downloader.params.get('writeannotations', False):
  2191. xsrf_token = self._search_regex(
  2192. r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>[A-Za-z0-9+/=]+)\2',
  2193. video_webpage, 'xsrf token', group='xsrf_token', fatal=False)
  2194. invideo_url = try_get(
  2195. player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str)
  2196. if xsrf_token and invideo_url:
  2197. xsrf_field_name = self._search_regex(
  2198. r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2',
  2199. video_webpage, 'xsrf field name',
  2200. group='xsrf_field_name', default='session_token')
  2201. video_annotations = self._download_webpage(
  2202. self._proto_relative_url(invideo_url),
  2203. video_id, note='Downloading annotations',
  2204. errnote='Unable to download video annotations', fatal=False,
  2205. data=urlencode_postdata({xsrf_field_name: xsrf_token}))
  2206. chapters = self._extract_chapters(video_webpage, description_original, video_id, video_duration)
  2207. # Look for the DASH manifest
  2208. if self._downloader.params.get('youtube_include_dash_manifest', True):
  2209. dash_mpd_fatal = True
  2210. for mpd_url in dash_mpds:
  2211. dash_formats = {}
  2212. try:
  2213. def decrypt_sig(mobj):
  2214. s = mobj.group(1)
  2215. dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
  2216. return '/signature/%s' % dec_s
  2217. mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
  2218. for df in self._extract_mpd_formats(
  2219. mpd_url, video_id, fatal=dash_mpd_fatal,
  2220. formats_dict=self._formats):
  2221. if not df.get('filesize'):
  2222. df['filesize'] = _extract_filesize(df['url'])
  2223. # Do not overwrite DASH format found in some previous DASH manifest
  2224. if df['format_id'] not in dash_formats:
  2225. dash_formats[df['format_id']] = df
  2226. # Additional DASH manifests may end up in HTTP Error 403 therefore
  2227. # allow them to fail without bug report message if we already have
  2228. # some DASH manifest succeeded. This is temporary workaround to reduce
  2229. # burst of bug reports until we figure out the reason and whether it
  2230. # can be fixed at all.
  2231. dash_mpd_fatal = False
  2232. except (ExtractorError, KeyError) as e:
  2233. self.report_warning(
  2234. 'Skipping DASH manifest: %r' % e, video_id)
  2235. if dash_formats:
  2236. # Remove the formats we found through non-DASH, they
  2237. # contain less info and it can be wrong, because we use
  2238. # fixed values (for example the resolution). See
  2239. # https://github.com/ytdl-org/youtube-dl/issues/5774 for an
  2240. # example.
  2241. formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
  2242. formats.extend(dash_formats.values())
  2243. # Check for malformed aspect ratio
  2244. stretched_m = re.search(
  2245. r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
  2246. video_webpage)
  2247. if stretched_m:
  2248. w = float(stretched_m.group('w'))
  2249. h = float(stretched_m.group('h'))
  2250. # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
  2251. # We will only process correct ratios.
  2252. if w > 0 and h > 0:
  2253. ratio = w / h
  2254. for f in formats:
  2255. if f.get('vcodec') != 'none':
  2256. f['stretched_ratio'] = ratio
  2257. if not formats:
  2258. if 'reason' in video_info:
  2259. if 'The uploader has not made this video available in your country.' in video_info['reason']:
  2260. regions_allowed = self._html_search_meta(
  2261. 'regionsAllowed', video_webpage, default=None)
  2262. countries = regions_allowed.split(',') if regions_allowed else None
  2263. self.raise_geo_restricted(
  2264. msg=video_info['reason'][0], countries=countries)
  2265. reason = video_info['reason'][0]
  2266. if 'Invalid parameters' in reason:
  2267. unavailable_message = extract_unavailable_message()
  2268. if unavailable_message:
  2269. reason = unavailable_message
  2270. raise ExtractorError(
  2271. 'YouTube said: %s' % reason,
  2272. expected=True, video_id=video_id)
  2273. if video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos']):
  2274. raise ExtractorError('This video is DRM protected.', expected=True)
  2275. self._sort_formats(formats)
  2276. self.mark_watched(video_id, video_info, player_response)
  2277. return {
  2278. 'id': video_id,
  2279. 'uploader': video_uploader,
  2280. 'uploader_id': video_uploader_id,
  2281. 'uploader_url': video_uploader_url,
  2282. 'channel_id': channel_id,
  2283. 'channel_url': channel_url,
  2284. 'upload_date': upload_date,
  2285. 'license': video_license,
  2286. 'creator': video_creator or artist,
  2287. 'title': video_title,
  2288. 'alt_title': video_alt_title or track,
  2289. 'thumbnails': thumbnails,
  2290. 'description': video_description,
  2291. 'categories': video_categories,
  2292. 'tags': video_tags,
  2293. 'subtitles': video_subtitles,
  2294. 'automatic_captions': automatic_captions,
  2295. 'duration': video_duration,
  2296. 'age_limit': 18 if age_gate else 0,
  2297. 'annotations': video_annotations,
  2298. 'chapters': chapters,
  2299. 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
  2300. 'view_count': view_count,
  2301. 'like_count': like_count,
  2302. 'dislike_count': dislike_count,
  2303. 'average_rating': average_rating,
  2304. 'formats': formats,
  2305. 'is_live': is_live,
  2306. 'start_time': start_time,
  2307. 'end_time': end_time,
  2308. 'series': series,
  2309. 'season_number': season_number,
  2310. 'episode_number': episode_number,
  2311. 'track': track,
  2312. 'artist': artist,
  2313. 'album': album,
  2314. 'release_date': release_date,
  2315. 'release_year': release_year,
  2316. 'subscriber_count': subscriber_count,
  2317. }
  2318. class YoutubeTabIE(YoutubeBaseInfoExtractor):
  2319. IE_DESC = 'YouTube.com tab'
  2320. _VALID_URL = r'''(?x)
  2321. https?://
  2322. (?:\w+\.)?
  2323. (?:
  2324. youtube(?:kids)?\.com|
  2325. invidio\.us
  2326. )/
  2327. (?:
  2328. (?:channel|c|user)/|
  2329. (?P<not_channel>
  2330. feed/|
  2331. (?:playlist|watch)\?.*?\blist=
  2332. )|
  2333. (?!(%s)([/#?]|$)) # Direct URLs
  2334. )
  2335. (?P<id>[^/?\#&]+)
  2336. ''' % YoutubeBaseInfoExtractor._RESERVED_NAMES
  2337. IE_NAME = 'youtube:tab'
  2338. _TESTS = [{
  2339. # playlists, multipage
  2340. 'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
  2341. 'playlist_mincount': 94,
  2342. 'info_dict': {
  2343. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2344. 'title': 'Игорь Клейнер - Playlists',
  2345. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2346. },
  2347. }, {
  2348. # playlists, multipage, different order
  2349. 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
  2350. 'playlist_mincount': 94,
  2351. 'info_dict': {
  2352. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2353. 'title': 'Игорь Клейнер - Playlists',
  2354. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2355. },
  2356. }, {
  2357. # playlists, singlepage
  2358. 'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
  2359. 'playlist_mincount': 4,
  2360. 'info_dict': {
  2361. 'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
  2362. 'title': 'ThirstForScience - Playlists',
  2363. 'description': 'md5:609399d937ea957b0f53cbffb747a14c',
  2364. }
  2365. }, {
  2366. 'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
  2367. 'only_matching': True,
  2368. }, {
  2369. # basic, single video playlist
  2370. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2371. 'info_dict': {
  2372. 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2373. 'uploader': 'Sergey M.',
  2374. 'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2375. 'title': 'youtube-dl public playlist',
  2376. },
  2377. 'playlist_count': 1,
  2378. }, {
  2379. # empty playlist
  2380. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2381. 'info_dict': {
  2382. 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2383. 'uploader': 'Sergey M.',
  2384. 'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2385. 'title': 'youtube-dl empty playlist',
  2386. },
  2387. 'playlist_count': 0,
  2388. }, {
  2389. # Home tab
  2390. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
  2391. 'info_dict': {
  2392. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2393. 'title': 'lex will - Home',
  2394. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2395. },
  2396. 'playlist_mincount': 2,
  2397. }, {
  2398. # Videos tab
  2399. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
  2400. 'info_dict': {
  2401. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2402. 'title': 'lex will - Videos',
  2403. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2404. },
  2405. 'playlist_mincount': 975,
  2406. }, {
  2407. # Videos tab, sorted by popular
  2408. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
  2409. 'info_dict': {
  2410. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2411. 'title': 'lex will - Videos',
  2412. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2413. },
  2414. 'playlist_mincount': 199,
  2415. }, {
  2416. # Playlists tab
  2417. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
  2418. 'info_dict': {
  2419. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2420. 'title': 'lex will - Playlists',
  2421. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2422. },
  2423. 'playlist_mincount': 17,
  2424. }, {
  2425. # Community tab
  2426. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
  2427. 'info_dict': {
  2428. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2429. 'title': 'lex will - Community',
  2430. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2431. },
  2432. 'playlist_mincount': 18,
  2433. }, {
  2434. # Channels tab
  2435. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
  2436. 'info_dict': {
  2437. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2438. 'title': 'lex will - Channels',
  2439. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2440. },
  2441. 'playlist_mincount': 138,
  2442. }, {
  2443. 'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2444. 'only_matching': True,
  2445. }, {
  2446. 'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2447. 'only_matching': True,
  2448. }, {
  2449. 'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2450. 'only_matching': True,
  2451. }, {
  2452. 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
  2453. 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2454. 'info_dict': {
  2455. 'title': '29C3: Not my department',
  2456. 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2457. 'uploader': 'Christiaan008',
  2458. 'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
  2459. },
  2460. 'playlist_count': 96,
  2461. }, {
  2462. 'note': 'Large playlist',
  2463. 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
  2464. 'info_dict': {
  2465. 'title': 'Uploads from Cauchemar',
  2466. 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
  2467. 'uploader': 'Cauchemar',
  2468. 'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
  2469. },
  2470. 'playlist_mincount': 1123,
  2471. }, {
  2472. # even larger playlist, 8832 videos
  2473. 'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
  2474. 'only_matching': True,
  2475. }, {
  2476. 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
  2477. 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
  2478. 'info_dict': {
  2479. 'title': 'Uploads from Interstellar Movie',
  2480. 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
  2481. 'uploader': 'Interstellar Movie',
  2482. 'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
  2483. },
  2484. 'playlist_mincount': 21,
  2485. }, {
  2486. # https://github.com/ytdl-org/youtube-dl/issues/21844
  2487. 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2488. 'info_dict': {
  2489. 'title': 'Data Analysis with Dr Mike Pound',
  2490. 'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2491. 'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
  2492. 'uploader': 'Computerphile',
  2493. },
  2494. 'playlist_mincount': 11,
  2495. }, {
  2496. 'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2497. 'only_matching': True,
  2498. }, {
  2499. # Playlist URL that does not actually serve a playlist
  2500. 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
  2501. 'info_dict': {
  2502. 'id': 'FqZTN594JQw',
  2503. 'ext': 'webm',
  2504. 'title': "Smiley's People 01 detective, Adventure Series, Action",
  2505. 'uploader': 'STREEM',
  2506. 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
  2507. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
  2508. 'upload_date': '20150526',
  2509. 'license': 'Standard YouTube License',
  2510. 'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
  2511. 'categories': ['People & Blogs'],
  2512. 'tags': list,
  2513. 'view_count': int,
  2514. 'like_count': int,
  2515. 'dislike_count': int,
  2516. },
  2517. 'params': {
  2518. 'skip_download': True,
  2519. },
  2520. 'skip': 'This video is not available.',
  2521. 'add_ie': [YoutubeIE.ie_key()],
  2522. }, {
  2523. 'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
  2524. 'only_matching': True,
  2525. }, {
  2526. 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
  2527. 'only_matching': True,
  2528. }, {
  2529. 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
  2530. 'info_dict': {
  2531. 'id': '9Auq9mYxFEE',
  2532. 'ext': 'mp4',
  2533. 'title': 'Watch Sky News live',
  2534. 'uploader': 'Sky News',
  2535. 'uploader_id': 'skynews',
  2536. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
  2537. 'upload_date': '20191102',
  2538. 'description': 'md5:78de4e1c2359d0ea3ed829678e38b662',
  2539. 'categories': ['News & Politics'],
  2540. 'tags': list,
  2541. 'like_count': int,
  2542. 'dislike_count': int,
  2543. },
  2544. 'params': {
  2545. 'skip_download': True,
  2546. },
  2547. }, {
  2548. 'url': 'https://www.youtube.com/user/TheYoungTurks/live',
  2549. 'info_dict': {
  2550. 'id': 'a48o2S1cPoo',
  2551. 'ext': 'mp4',
  2552. 'title': 'The Young Turks - Live Main Show',
  2553. 'uploader': 'The Young Turks',
  2554. 'uploader_id': 'TheYoungTurks',
  2555. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
  2556. 'upload_date': '20150715',
  2557. 'license': 'Standard YouTube License',
  2558. 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
  2559. 'categories': ['News & Politics'],
  2560. 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
  2561. 'like_count': int,
  2562. 'dislike_count': int,
  2563. },
  2564. 'params': {
  2565. 'skip_download': True,
  2566. },
  2567. 'only_matching': True,
  2568. }, {
  2569. 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
  2570. 'only_matching': True,
  2571. }, {
  2572. 'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
  2573. 'only_matching': True,
  2574. }, {
  2575. 'url': 'https://www.youtube.com/feed/trending',
  2576. 'only_matching': True,
  2577. }, {
  2578. # needs auth
  2579. 'url': 'https://www.youtube.com/feed/library',
  2580. 'only_matching': True,
  2581. }, {
  2582. # needs auth
  2583. 'url': 'https://www.youtube.com/feed/history',
  2584. 'only_matching': True,
  2585. }, {
  2586. # needs auth
  2587. 'url': 'https://www.youtube.com/feed/subscriptions',
  2588. 'only_matching': True,
  2589. }, {
  2590. # needs auth
  2591. 'url': 'https://www.youtube.com/feed/watch_later',
  2592. 'only_matching': True,
  2593. }, {
  2594. # no longer available?
  2595. 'url': 'https://www.youtube.com/feed/recommended',
  2596. 'only_matching': True,
  2597. }
  2598. # TODO
  2599. # {
  2600. # 'url': 'https://www.youtube.com/TheYoungTurks/live',
  2601. # 'only_matching': True,
  2602. # }
  2603. ]
  2604. def _extract_channel_id(self, webpage):
  2605. channel_id = self._html_search_meta(
  2606. 'channelId', webpage, 'channel id', default=None)
  2607. if channel_id:
  2608. return channel_id
  2609. channel_url = self._html_search_meta(
  2610. ('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
  2611. 'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
  2612. 'twitter:app:url:googleplay'), webpage, 'channel url')
  2613. return self._search_regex(
  2614. r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
  2615. channel_url, 'channel id')
  2616. @staticmethod
  2617. def _extract_grid_item_renderer(item):
  2618. for item_kind in ('Playlist', 'Video', 'Channel'):
  2619. renderer = item.get('grid%sRenderer' % item_kind)
  2620. if renderer:
  2621. return renderer
  2622. def _extract_video(self, renderer):
  2623. video_id = renderer.get('videoId')
  2624. title = try_get(
  2625. renderer,
  2626. (lambda x: x['title']['runs'][0]['text'],
  2627. lambda x: x['title']['simpleText']), compat_str)
  2628. description = try_get(
  2629. renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
  2630. compat_str)
  2631. duration = parse_duration(try_get(
  2632. renderer, lambda x: x['lengthText']['simpleText'], compat_str))
  2633. view_count_text = try_get(
  2634. renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
  2635. view_count = str_to_int(self._search_regex(
  2636. r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
  2637. 'view count', default=None))
  2638. uploader = try_get(
  2639. renderer, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
  2640. return {
  2641. '_type': 'url_transparent',
  2642. 'ie_key': YoutubeIE.ie_key(),
  2643. 'id': video_id,
  2644. 'url': video_id,
  2645. 'title': title,
  2646. 'description': description,
  2647. 'duration': duration,
  2648. 'view_count': view_count,
  2649. 'uploader': uploader,
  2650. }
  2651. def _grid_entries(self, grid_renderer):
  2652. for item in grid_renderer['items']:
  2653. if not isinstance(item, dict):
  2654. continue
  2655. renderer = self._extract_grid_item_renderer(item)
  2656. if not isinstance(renderer, dict):
  2657. continue
  2658. title = try_get(
  2659. renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
  2660. # playlist
  2661. playlist_id = renderer.get('playlistId')
  2662. if playlist_id:
  2663. yield self.url_result(
  2664. 'https://www.youtube.com/playlist?list=%s' % playlist_id,
  2665. ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
  2666. video_title=title)
  2667. # video
  2668. video_id = renderer.get('videoId')
  2669. if video_id:
  2670. yield self._extract_video(renderer)
  2671. # channel
  2672. channel_id = renderer.get('channelId')
  2673. if channel_id:
  2674. title = try_get(
  2675. renderer, lambda x: x['title']['simpleText'], compat_str)
  2676. yield self.url_result(
  2677. 'https://www.youtube.com/channel/%s' % channel_id,
  2678. ie=YoutubeTabIE.ie_key(), video_title=title)
  2679. def _shelf_entries_from_content(self, shelf_renderer):
  2680. content = shelf_renderer.get('content')
  2681. if not isinstance(content, dict):
  2682. return
  2683. renderer = content.get('gridRenderer')
  2684. if renderer:
  2685. # TODO: add support for nested playlists so each shelf is processed
  2686. # as separate playlist
  2687. # TODO: this includes only first N items
  2688. for entry in self._grid_entries(renderer):
  2689. yield entry
  2690. renderer = content.get('horizontalListRenderer')
  2691. if renderer:
  2692. # TODO
  2693. pass
  2694. def _shelf_entries(self, shelf_renderer):
  2695. ep = try_get(
  2696. shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
  2697. compat_str)
  2698. shelf_url = urljoin('https://www.youtube.com', ep)
  2699. if shelf_url:
  2700. title = try_get(
  2701. shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
  2702. yield self.url_result(shelf_url, video_title=title)
  2703. # Shelf may not contain shelf URL, fallback to extraction from content
  2704. for entry in self._shelf_entries_from_content(shelf_renderer):
  2705. yield entry
  2706. def _playlist_entries(self, video_list_renderer):
  2707. for content in video_list_renderer['contents']:
  2708. if not isinstance(content, dict):
  2709. continue
  2710. renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
  2711. if not isinstance(renderer, dict):
  2712. continue
  2713. video_id = renderer.get('videoId')
  2714. if not video_id:
  2715. continue
  2716. yield self._extract_video(renderer)
  2717. r""" # Not needed in the new implementation
  2718. def _itemSection_entries(self, item_sect_renderer):
  2719. for content in item_sect_renderer['contents']:
  2720. if not isinstance(content, dict):
  2721. continue
  2722. renderer = content.get('videoRenderer', {})
  2723. if not isinstance(renderer, dict):
  2724. continue
  2725. video_id = renderer.get('videoId')
  2726. if not video_id:
  2727. continue
  2728. yield self._extract_video(renderer)
  2729. """
  2730. def _rich_entries(self, rich_grid_renderer):
  2731. renderer = try_get(
  2732. rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {}
  2733. video_id = renderer.get('videoId')
  2734. if not video_id:
  2735. return
  2736. yield self._extract_video(renderer)
  2737. def _video_entry(self, video_renderer):
  2738. video_id = video_renderer.get('videoId')
  2739. if video_id:
  2740. return self._extract_video(video_renderer)
  2741. def _post_thread_entries(self, post_thread_renderer):
  2742. post_renderer = try_get(
  2743. post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
  2744. if not post_renderer:
  2745. return
  2746. # video attachment
  2747. video_renderer = try_get(
  2748. post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
  2749. video_id = None
  2750. if video_renderer:
  2751. entry = self._video_entry(video_renderer)
  2752. if entry:
  2753. yield entry
  2754. # inline video links
  2755. runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
  2756. for run in runs:
  2757. if not isinstance(run, dict):
  2758. continue
  2759. ep_url = try_get(
  2760. run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
  2761. if not ep_url:
  2762. continue
  2763. if not YoutubeIE.suitable(ep_url):
  2764. continue
  2765. ep_video_id = YoutubeIE._match_id(ep_url)
  2766. if video_id == ep_video_id:
  2767. continue
  2768. yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
  2769. def _post_thread_continuation_entries(self, post_thread_continuation):
  2770. contents = post_thread_continuation.get('contents')
  2771. if not isinstance(contents, list):
  2772. return
  2773. for content in contents:
  2774. renderer = content.get('backstagePostThreadRenderer')
  2775. if not isinstance(renderer, dict):
  2776. continue
  2777. for entry in self._post_thread_entries(renderer):
  2778. yield entry
  2779. @staticmethod
  2780. def _extract_next_continuation_data(renderer):
  2781. next_continuation = try_get(
  2782. renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict)
  2783. if not next_continuation:
  2784. return
  2785. continuation = next_continuation.get('continuation')
  2786. if not continuation:
  2787. return
  2788. ctp = next_continuation.get('clickTrackingParams')
  2789. return {
  2790. 'ctoken': continuation,
  2791. 'continuation': continuation,
  2792. 'itct': ctp,
  2793. }
  2794. @classmethod
  2795. def _extract_continuation(cls, renderer):
  2796. next_continuation = cls._extract_next_continuation_data(renderer)
  2797. if next_continuation:
  2798. return next_continuation
  2799. contents = renderer.get('contents')
  2800. if not isinstance(contents, list):
  2801. return
  2802. for content in contents:
  2803. if not isinstance(content, dict):
  2804. continue
  2805. continuation_ep = try_get(
  2806. content, lambda x: x['continuationItemRenderer']['continuationEndpoint'],
  2807. dict)
  2808. if not continuation_ep:
  2809. continue
  2810. continuation = try_get(
  2811. continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
  2812. if not continuation:
  2813. continue
  2814. ctp = continuation_ep.get('clickTrackingParams')
  2815. if not ctp:
  2816. continue
  2817. return {
  2818. 'ctoken': continuation,
  2819. 'continuation': continuation,
  2820. 'itct': ctp,
  2821. }
  2822. def _entries(self, tab, identity_token):
  2823. def extract_entries(parent_renderer): # this needs to called again for continuation to work with feeds
  2824. contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
  2825. for content in contents:
  2826. if not isinstance(content, dict):
  2827. continue
  2828. is_renderer = try_get(content, lambda x: x['itemSectionRenderer'], dict)
  2829. if not is_renderer:
  2830. renderer = content.get('richItemRenderer')
  2831. if renderer:
  2832. for entry in self._rich_entries(renderer):
  2833. yield entry
  2834. continuation_list[0] = self._extract_continuation(parent_renderer)
  2835. continue
  2836. isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
  2837. for isr_content in isr_contents:
  2838. if not isinstance(isr_content, dict):
  2839. continue
  2840. renderer = isr_content.get('playlistVideoListRenderer')
  2841. if renderer:
  2842. for entry in self._playlist_entries(renderer):
  2843. yield entry
  2844. continuation_list[0] = self._extract_continuation(renderer)
  2845. continue
  2846. renderer = isr_content.get('gridRenderer')
  2847. if renderer:
  2848. for entry in self._grid_entries(renderer):
  2849. yield entry
  2850. continuation_list[0] = self._extract_continuation(renderer)
  2851. continue
  2852. renderer = isr_content.get('shelfRenderer')
  2853. if renderer:
  2854. for entry in self._shelf_entries(renderer):
  2855. yield entry
  2856. continue
  2857. renderer = isr_content.get('backstagePostThreadRenderer')
  2858. if renderer:
  2859. for entry in self._post_thread_entries(renderer):
  2860. yield entry
  2861. continuation_list[0] = self._extract_continuation(renderer)
  2862. continue
  2863. renderer = isr_content.get('videoRenderer')
  2864. if renderer:
  2865. entry = self._video_entry(renderer)
  2866. if entry:
  2867. yield entry
  2868. if not continuation_list[0]:
  2869. continuation_list[0] = self._extract_continuation(is_renderer)
  2870. if not continuation_list[0]:
  2871. continuation_list[0] = self._extract_continuation(parent_renderer)
  2872. continuation_list = [None] # Python 2 doesnot support nonlocal
  2873. parent_renderer = (
  2874. try_get(tab, lambda x: x['sectionListRenderer'], dict)
  2875. or try_get(tab, lambda x: x['richGridRenderer'], dict) or {})
  2876. for entry in extract_entries(parent_renderer):
  2877. yield entry
  2878. continuation = continuation_list[0]
  2879. headers = {
  2880. 'x-youtube-client-name': '1',
  2881. 'x-youtube-client-version': '2.20201112.04.01',
  2882. }
  2883. if identity_token:
  2884. headers['x-youtube-identity-token'] = identity_token
  2885. for page_num in itertools.count(1):
  2886. if not continuation:
  2887. break
  2888. browse = self._download_json(
  2889. 'https://www.youtube.com/browse_ajax', None,
  2890. 'Downloading page %d' % page_num,
  2891. headers=headers, query=continuation, fatal=False)
  2892. if not browse:
  2893. break
  2894. response = try_get(browse, lambda x: x[1]['response'], dict)
  2895. if not response:
  2896. break
  2897. continuation_contents = try_get(
  2898. response, lambda x: x['continuationContents'], dict)
  2899. if continuation_contents:
  2900. continuation_renderer = continuation_contents.get('playlistVideoListContinuation')
  2901. if continuation_renderer:
  2902. for entry in self._playlist_entries(continuation_renderer):
  2903. yield entry
  2904. continuation = self._extract_continuation(continuation_renderer)
  2905. continue
  2906. continuation_renderer = continuation_contents.get('gridContinuation')
  2907. if continuation_renderer:
  2908. for entry in self._grid_entries(continuation_renderer):
  2909. yield entry
  2910. continuation = self._extract_continuation(continuation_renderer)
  2911. continue
  2912. continuation_renderer = continuation_contents.get('itemSectionContinuation')
  2913. if continuation_renderer:
  2914. for entry in self._post_thread_continuation_entries(continuation_renderer):
  2915. yield entry
  2916. continuation = self._extract_continuation(continuation_renderer)
  2917. continue
  2918. continuation_renderer = continuation_contents.get('sectionListContinuation') # for feeds
  2919. if continuation_renderer:
  2920. continuation_list = [None]
  2921. for entry in extract_entries(continuation_renderer):
  2922. yield entry
  2923. continuation = continuation_list[0]
  2924. continue
  2925. continuation_items = try_get(
  2926. response, lambda x: x['onResponseReceivedActions'][0]['appendContinuationItemsAction']['continuationItems'], list)
  2927. if continuation_items:
  2928. continuation_item = continuation_items[0]
  2929. if not isinstance(continuation_item, dict):
  2930. continue
  2931. renderer = continuation_item.get('playlistVideoRenderer') or continuation_item.get('itemSectionRenderer')
  2932. if renderer:
  2933. video_list_renderer = {'contents': continuation_items}
  2934. for entry in self._playlist_entries(video_list_renderer):
  2935. yield entry
  2936. continuation = self._extract_continuation(video_list_renderer)
  2937. continue
  2938. break
  2939. @staticmethod
  2940. def _extract_selected_tab(tabs):
  2941. for tab in tabs:
  2942. if try_get(tab, lambda x: x['tabRenderer']['selected'], bool):
  2943. return tab['tabRenderer']
  2944. else:
  2945. raise ExtractorError('Unable to find selected tab')
  2946. @staticmethod
  2947. def _extract_uploader(data):
  2948. uploader = {}
  2949. sidebar_renderer = try_get(
  2950. data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list)
  2951. if sidebar_renderer:
  2952. for item in sidebar_renderer:
  2953. if not isinstance(item, dict):
  2954. continue
  2955. renderer = item.get('playlistSidebarSecondaryInfoRenderer')
  2956. if not isinstance(renderer, dict):
  2957. continue
  2958. owner = try_get(
  2959. renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
  2960. if owner:
  2961. uploader['uploader'] = owner.get('text')
  2962. uploader['uploader_id'] = try_get(
  2963. owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
  2964. uploader['uploader_url'] = urljoin(
  2965. 'https://www.youtube.com/',
  2966. try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
  2967. return uploader
  2968. def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token):
  2969. selected_tab = self._extract_selected_tab(tabs)
  2970. renderer = try_get(
  2971. data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
  2972. playlist_id = title = description = None
  2973. if renderer:
  2974. channel_title = renderer.get('title') or item_id
  2975. tab_title = selected_tab.get('title')
  2976. title = channel_title or item_id
  2977. if tab_title:
  2978. title += ' - %s' % tab_title
  2979. description = renderer.get('description')
  2980. playlist_id = renderer.get('externalId')
  2981. renderer = try_get(
  2982. data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
  2983. if renderer:
  2984. title = renderer.get('title')
  2985. description = None
  2986. playlist_id = item_id
  2987. if playlist_id is None:
  2988. playlist_id = item_id
  2989. if title is None:
  2990. title = "Youtube " + playlist_id.title()
  2991. playlist = self.playlist_result(
  2992. self._entries(selected_tab['content'], identity_token),
  2993. playlist_id=playlist_id, playlist_title=title,
  2994. playlist_description=description)
  2995. playlist.update(self._extract_uploader(data))
  2996. return playlist
  2997. def _extract_from_playlist(self, item_id, data, playlist):
  2998. title = playlist.get('title') or try_get(
  2999. data, lambda x: x['titleText']['simpleText'], compat_str)
  3000. playlist_id = playlist.get('playlistId') or item_id
  3001. return self.playlist_result(
  3002. self._playlist_entries(playlist), playlist_id=playlist_id,
  3003. playlist_title=title)
  3004. def _extract_alerts(self, data):
  3005. for alert_dict in try_get(data, lambda x: x['alerts'], list) or []:
  3006. for renderer in alert_dict:
  3007. alert = alert_dict[renderer]
  3008. alert_type = alert.get('type')
  3009. if not alert_type:
  3010. continue
  3011. message = try_get(alert, lambda x: x['text']['simpleText'], compat_str)
  3012. if message:
  3013. yield alert_type, message
  3014. for run in try_get(alert, lambda x: x['text']['runs'], list) or []:
  3015. message = try_get(run, lambda x: x['text'], compat_str)
  3016. if message:
  3017. yield alert_type, message
  3018. def _real_extract(self, url):
  3019. item_id = self._match_id(url)
  3020. url = compat_urlparse.urlunparse(
  3021. compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
  3022. is_home = re.match(r'(?P<pre>%s)(?P<post>/?(?![^#?]).*$)' % self._VALID_URL, url)
  3023. if is_home is not None and is_home.group('not_channel') is None and item_id != 'feed':
  3024. self._downloader.report_warning(
  3025. 'A channel/user page was given. All the channel\'s videos will be downloaded. '
  3026. 'To download only the videos in the home page, add a "/home" to the URL')
  3027. url = '%s/videos%s' % (is_home.group('pre'), is_home.group('post') or '')
  3028. # Handle both video/playlist URLs
  3029. qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
  3030. video_id = qs.get('v', [None])[0]
  3031. playlist_id = qs.get('list', [None])[0]
  3032. if is_home.group('not_channel') is not None and is_home.group('not_channel').startswith('watch') and not video_id:
  3033. if playlist_id:
  3034. self._downloader.report_warning('%s is not a valid Youtube URL. Trying to download playlist %s' % (url, playlist_id))
  3035. url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
  3036. # return self.url_result(playlist_id, ie=YoutubePlaylistIE.ie_key())
  3037. else:
  3038. raise ExtractorError('Unable to recognize tab page')
  3039. if video_id and playlist_id:
  3040. if self._downloader.params.get('noplaylist'):
  3041. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  3042. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  3043. self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
  3044. webpage = self._download_webpage(url, item_id)
  3045. identity_token = self._search_regex(
  3046. r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
  3047. 'identity token', default=None)
  3048. data = self._extract_yt_initial_data(item_id, webpage)
  3049. for alert_type, alert_message in self._extract_alerts(data):
  3050. self._downloader.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
  3051. tabs = try_get(
  3052. data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
  3053. if tabs:
  3054. return self._extract_from_tabs(item_id, webpage, data, tabs, identity_token)
  3055. playlist = try_get(
  3056. data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
  3057. if playlist:
  3058. return self._extract_from_playlist(item_id, data, playlist)
  3059. # Fallback to video extraction if no playlist alike page is recognized.
  3060. # First check for the current video then try the v attribute of URL query.
  3061. video_id = try_get(
  3062. data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
  3063. compat_str) or video_id
  3064. if video_id:
  3065. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  3066. # Failed to recognize
  3067. raise ExtractorError('Unable to recognize tab page')
  3068. class YoutubePlaylistIE(InfoExtractor):
  3069. IE_DESC = 'YouTube.com playlists'
  3070. _VALID_URL = r'''(?x)(?:
  3071. (?:https?://)?
  3072. (?:\w+\.)?
  3073. (?:
  3074. (?:
  3075. youtube(?:kids)?\.com|
  3076. invidio\.us|
  3077. youtu\.be
  3078. )
  3079. /.*?\?.*?\blist=
  3080. )?
  3081. (?P<id>%(playlist_id)s)
  3082. )''' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  3083. IE_NAME = 'youtube:playlist'
  3084. _TESTS = [{
  3085. 'note': 'issue #673',
  3086. 'url': 'PLBB231211A4F62143',
  3087. 'info_dict': {
  3088. 'title': '[OLD]Team Fortress 2 (Class-based LP)',
  3089. 'id': 'PLBB231211A4F62143',
  3090. 'uploader': 'Wickydoo',
  3091. 'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
  3092. },
  3093. 'playlist_mincount': 29,
  3094. }, {
  3095. 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  3096. 'info_dict': {
  3097. 'title': 'YDL_safe_search',
  3098. 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  3099. },
  3100. 'playlist_count': 2,
  3101. 'skip': 'This playlist is private',
  3102. }, {
  3103. 'note': 'embedded',
  3104. 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  3105. 'playlist_count': 4,
  3106. 'info_dict': {
  3107. 'title': 'JODA15',
  3108. 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  3109. 'uploader': 'milan',
  3110. 'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
  3111. }
  3112. }, {
  3113. 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  3114. 'playlist_mincount': 982,
  3115. 'info_dict': {
  3116. 'title': '2018 Chinese New Singles (11/6 updated)',
  3117. 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  3118. 'uploader': 'LBK',
  3119. 'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
  3120. }
  3121. }, {
  3122. 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
  3123. 'info_dict': {
  3124. 'id': 'yeWKywCrFtk',
  3125. 'ext': 'mp4',
  3126. 'title': 'Small Scale Baler and Braiding Rugs',
  3127. 'uploader': 'Backus-Page House Museum',
  3128. 'uploader_id': 'backuspagemuseum',
  3129. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
  3130. 'upload_date': '20161008',
  3131. 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
  3132. 'categories': ['Nonprofits & Activism'],
  3133. 'tags': list,
  3134. 'like_count': int,
  3135. 'dislike_count': int,
  3136. },
  3137. 'params': {
  3138. 'noplaylist': True,
  3139. 'skip_download': True,
  3140. },
  3141. }, {
  3142. 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
  3143. 'only_matching': True,
  3144. }, {
  3145. 'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
  3146. 'only_matching': True,
  3147. }, {
  3148. # music album playlist
  3149. 'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
  3150. 'only_matching': True,
  3151. }]
  3152. @classmethod
  3153. def suitable(cls, url):
  3154. return False if YoutubeTabIE.suitable(url) else super(
  3155. YoutubePlaylistIE, cls).suitable(url)
  3156. def _real_extract(self, url):
  3157. playlist_id = self._match_id(url)
  3158. qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
  3159. if not qs:
  3160. qs = {'list': playlist_id}
  3161. return self.url_result(
  3162. update_url_query('https://www.youtube.com/playlist', qs),
  3163. ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
  3164. class YoutubeYtUserIE(InfoExtractor):
  3165. _VALID_URL = r'ytuser:(?P<id>.+)'
  3166. _TESTS = [{
  3167. 'url': 'ytuser:phihag',
  3168. 'only_matching': True,
  3169. }]
  3170. def _real_extract(self, url):
  3171. user_id = self._match_id(url)
  3172. return self.url_result(
  3173. 'https://www.youtube.com/user/%s' % user_id,
  3174. ie=YoutubeTabIE.ie_key(), video_id=user_id)
  3175. class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
  3176. IE_NAME = 'youtube:favorites'
  3177. IE_DESC = 'YouTube.com liked videos, ":ytfav" for short (requires authentication)'
  3178. _VALID_URL = r':ytfav(?:ou?rite)?s?'
  3179. _LOGIN_REQUIRED = True
  3180. _TESTS = [{
  3181. 'url': ':ytfav',
  3182. 'only_matching': True,
  3183. }, {
  3184. 'url': ':ytfavorites',
  3185. 'only_matching': True,
  3186. }]
  3187. def _real_extract(self, url):
  3188. return self.url_result(
  3189. 'https://www.youtube.com/playlist?list=LL',
  3190. ie=YoutubeTabIE.ie_key())
  3191. class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
  3192. IE_DESC = 'YouTube.com searches'
  3193. # there doesn't appear to be a real limit, for example if you search for
  3194. # 'python' you get more than 8.000.000 results
  3195. _MAX_RESULTS = float('inf')
  3196. IE_NAME = 'youtube:search'
  3197. _SEARCH_KEY = 'ytsearch'
  3198. _SEARCH_PARAMS = None
  3199. _TESTS = []
  3200. def _entries(self, query, n):
  3201. data = {
  3202. 'context': {
  3203. 'client': {
  3204. 'clientName': 'WEB',
  3205. 'clientVersion': '2.20201021.03.00',
  3206. }
  3207. },
  3208. 'query': query,
  3209. }
  3210. if self._SEARCH_PARAMS:
  3211. data['params'] = self._SEARCH_PARAMS
  3212. total = 0
  3213. for page_num in itertools.count(1):
  3214. search = self._download_json(
  3215. 'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
  3216. video_id='query "%s"' % query,
  3217. note='Downloading page %s' % page_num,
  3218. errnote='Unable to download API page', fatal=False,
  3219. data=json.dumps(data).encode('utf8'),
  3220. headers={'content-type': 'application/json'})
  3221. if not search:
  3222. break
  3223. slr_contents = try_get(
  3224. search,
  3225. (lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
  3226. lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
  3227. list)
  3228. if not slr_contents:
  3229. break
  3230. isr_contents = []
  3231. continuation_token = None
  3232. # Youtube sometimes adds promoted content to searches,
  3233. # changing the index location of videos and token.
  3234. # So we search through all entries till we find them.
  3235. for index, isr in enumerate(slr_contents):
  3236. if not isr_contents:
  3237. isr_contents = try_get(
  3238. slr_contents,
  3239. (lambda x: x[index]['itemSectionRenderer']['contents']),
  3240. list)
  3241. for content in isr_contents:
  3242. if content.get('videoRenderer') is not None:
  3243. break
  3244. else:
  3245. isr_contents = []
  3246. if continuation_token is None:
  3247. continuation_token = try_get(
  3248. slr_contents,
  3249. lambda x: x[index]['continuationItemRenderer']['continuationEndpoint']['continuationCommand'][
  3250. 'token'],
  3251. compat_str)
  3252. if continuation_token is not None and isr_contents:
  3253. break
  3254. if not isr_contents:
  3255. break
  3256. for content in isr_contents:
  3257. if not isinstance(content, dict):
  3258. continue
  3259. video = content.get('videoRenderer')
  3260. if not isinstance(video, dict):
  3261. continue
  3262. video_id = video.get('videoId')
  3263. if not video_id:
  3264. continue
  3265. title = try_get(video, lambda x: x['title']['runs'][0]['text'], compat_str)
  3266. description = try_get(video, lambda x: x['descriptionSnippet']['runs'][0]['text'], compat_str)
  3267. duration = parse_duration(try_get(video, lambda x: x['lengthText']['simpleText'], compat_str))
  3268. view_count_text = try_get(video, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
  3269. view_count = int_or_none(self._search_regex(
  3270. r'^(\d+)', re.sub(r'\s', '', view_count_text),
  3271. 'view count', default=None))
  3272. uploader = try_get(video, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
  3273. total += 1
  3274. yield {
  3275. '_type': 'url_transparent',
  3276. 'ie_key': YoutubeIE.ie_key(),
  3277. 'id': video_id,
  3278. 'url': video_id,
  3279. 'title': title,
  3280. 'description': description,
  3281. 'duration': duration,
  3282. 'view_count': view_count,
  3283. 'uploader': uploader,
  3284. }
  3285. if total == n:
  3286. return
  3287. if not continuation_token:
  3288. break
  3289. data['continuation'] = continuation_token
  3290. def _get_n_results(self, query, n):
  3291. """Get a specified number of results for a query"""
  3292. return self.playlist_result(self._entries(query, n), query)
  3293. class YoutubeSearchDateIE(YoutubeSearchIE):
  3294. IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
  3295. _SEARCH_KEY = 'ytsearchdate'
  3296. IE_DESC = 'YouTube.com searches, newest videos first'
  3297. _SEARCH_PARAMS = 'CAI%3D'
  3298. class YoutubeSearchURLIE(YoutubeSearchIE):
  3299. IE_DESC = 'YouTube.com search URLs'
  3300. IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
  3301. _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?:[^&]+)(?:[&]|$)'
  3302. # _MAX_RESULTS = 100
  3303. _TESTS = [{
  3304. 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
  3305. 'playlist_mincount': 5,
  3306. 'info_dict': {
  3307. 'title': 'youtube-dl test video',
  3308. }
  3309. }, {
  3310. 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
  3311. 'only_matching': True,
  3312. }]
  3313. @classmethod
  3314. def _make_valid_url(cls):
  3315. return cls._VALID_URL
  3316. def _real_extract(self, url):
  3317. qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
  3318. query = (qs.get('search_query') or qs.get('q'))[0]
  3319. self._SEARCH_PARAMS = qs.get('sp', ('',))[0]
  3320. return self._get_n_results(query, self._MAX_RESULTS)
  3321. class YoutubeFeedsInfoExtractor(YoutubeTabIE):
  3322. """
  3323. Base class for feed extractors
  3324. Subclasses must define the _FEED_NAME property.
  3325. """
  3326. _LOGIN_REQUIRED = True
  3327. # _MAX_PAGES = 5
  3328. _TESTS = []
  3329. @property
  3330. def IE_NAME(self):
  3331. return 'youtube:%s' % self._FEED_NAME
  3332. def _real_initialize(self):
  3333. self._login()
  3334. def _real_extract(self, url):
  3335. return self.url_result(
  3336. 'https://www.youtube.com/feed/%s' % self._FEED_NAME,
  3337. ie=YoutubeTabIE.ie_key())
  3338. class YoutubeWatchLaterIE(InfoExtractor):
  3339. IE_NAME = 'youtube:watchlater'
  3340. IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
  3341. _VALID_URL = r':ytwatchlater'
  3342. _TESTS = [{
  3343. 'url': ':ytwatchlater',
  3344. 'only_matching': True,
  3345. }]
  3346. def _real_extract(self, url):
  3347. return self.url_result(
  3348. 'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
  3349. class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
  3350. IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
  3351. _VALID_URL = r'https?://(?:www\.)?youtube\.com/?(?:[?#]|$)|:ytrec(?:ommended)?'
  3352. _FEED_NAME = 'recommended'
  3353. _TESTS = [{
  3354. 'url': ':ytrec',
  3355. 'only_matching': True,
  3356. }, {
  3357. 'url': ':ytrecommended',
  3358. 'only_matching': True,
  3359. }, {
  3360. 'url': 'https://youtube.com',
  3361. 'only_matching': True,
  3362. }]
  3363. class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
  3364. IE_DESC = 'YouTube.com subscriptions feed, ":ytsubs" for short (requires authentication)'
  3365. _VALID_URL = r':ytsub(?:scription)?s?'
  3366. _FEED_NAME = 'subscriptions'
  3367. _TESTS = [{
  3368. 'url': ':ytsubs',
  3369. 'only_matching': True,
  3370. }, {
  3371. 'url': ':ytsubscriptions',
  3372. 'only_matching': True,
  3373. }]
  3374. class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
  3375. IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
  3376. _VALID_URL = r':ythistory'
  3377. _FEED_NAME = 'history'
  3378. _TESTS = [{
  3379. 'url': ':ythistory',
  3380. 'only_matching': True,
  3381. }]
  3382. class YoutubeTruncatedURLIE(InfoExtractor):
  3383. IE_NAME = 'youtube:truncated_url'
  3384. IE_DESC = False # Do not list
  3385. _VALID_URL = r'''(?x)
  3386. (?:https?://)?
  3387. (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
  3388. (?:watch\?(?:
  3389. feature=[a-z_]+|
  3390. annotation_id=annotation_[^&]+|
  3391. x-yt-cl=[0-9]+|
  3392. hl=[^&]*|
  3393. t=[0-9]+
  3394. )?
  3395. |
  3396. attribution_link\?a=[^&]+
  3397. )
  3398. $
  3399. '''
  3400. _TESTS = [{
  3401. 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
  3402. 'only_matching': True,
  3403. }, {
  3404. 'url': 'https://www.youtube.com/watch?',
  3405. 'only_matching': True,
  3406. }, {
  3407. 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
  3408. 'only_matching': True,
  3409. }, {
  3410. 'url': 'https://www.youtube.com/watch?feature=foo',
  3411. 'only_matching': True,
  3412. }, {
  3413. 'url': 'https://www.youtube.com/watch?hl=en-GB',
  3414. 'only_matching': True,
  3415. }, {
  3416. 'url': 'https://www.youtube.com/watch?t=2372',
  3417. 'only_matching': True,
  3418. }]
  3419. def _real_extract(self, url):
  3420. raise ExtractorError(
  3421. 'Did you forget to quote the URL? Remember that & is a meta '
  3422. 'character in most shells, so you want to put the URL in quotes, '
  3423. 'like youtube-dl '
  3424. '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
  3425. ' or simply youtube-dl BaW_jenozKc .',
  3426. expected=True)
  3427. class YoutubeTruncatedIDIE(InfoExtractor):
  3428. IE_NAME = 'youtube:truncated_id'
  3429. IE_DESC = False # Do not list
  3430. _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
  3431. _TESTS = [{
  3432. 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
  3433. 'only_matching': True,
  3434. }]
  3435. def _real_extract(self, url):
  3436. video_id = self._match_id(url)
  3437. raise ExtractorError(
  3438. 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
  3439. expected=True)
  3440. # Do Youtube show urls even exist anymore? I couldn't find any
  3441. r'''
  3442. class YoutubeShowIE(YoutubeTabIE):
  3443. IE_DESC = 'YouTube.com (multi-season) shows'
  3444. _VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
  3445. IE_NAME = 'youtube:show'
  3446. _TESTS = [{
  3447. 'url': 'https://www.youtube.com/show/airdisasters',
  3448. 'playlist_mincount': 5,
  3449. 'info_dict': {
  3450. 'id': 'airdisasters',
  3451. 'title': 'Air Disasters',
  3452. }
  3453. }]
  3454. def _real_extract(self, url):
  3455. playlist_id = self._match_id(url)
  3456. return super(YoutubeShowIE, self)._real_extract(
  3457. 'https://www.youtube.com/show/%s/playlists' % playlist_id)
  3458. '''