You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3671 lines
164 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import itertools
  4. import json
  5. import os.path
  6. import random
  7. import re
  8. import time
  9. import traceback
  10. from .common import InfoExtractor, SearchInfoExtractor
  11. from ..jsinterp import JSInterpreter
  12. from ..swfinterp import SWFInterpreter
  13. from ..compat import (
  14. compat_chr,
  15. compat_kwargs,
  16. compat_parse_qs,
  17. compat_urllib_parse_unquote,
  18. compat_urllib_parse_unquote_plus,
  19. compat_urllib_parse_urlencode,
  20. compat_urllib_parse_urlparse,
  21. compat_urlparse,
  22. compat_str,
  23. )
  24. from ..utils import (
  25. bool_or_none,
  26. clean_html,
  27. error_to_compat_str,
  28. ExtractorError,
  29. float_or_none,
  30. get_element_by_id,
  31. int_or_none,
  32. mimetype2ext,
  33. parse_codecs,
  34. parse_count,
  35. parse_duration,
  36. remove_quotes,
  37. remove_start,
  38. smuggle_url,
  39. str_or_none,
  40. str_to_int,
  41. try_get,
  42. unescapeHTML,
  43. unified_strdate,
  44. unsmuggle_url,
  45. update_url_query,
  46. uppercase_escape,
  47. url_or_none,
  48. urlencode_postdata,
  49. urljoin,
  50. )
  51. class YoutubeBaseInfoExtractor(InfoExtractor):
  52. """Provide base functions for Youtube extractors"""
  53. _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
  54. _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
  55. _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
  56. _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
  57. _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
  58. _RESERVED_NAMES = (
  59. r'course|embed|watch|w|results|storefront|'
  60. r'shared|index|account|reporthistory|t/terms|about|upload|signin|logout|'
  61. r'feed/(watch_later|history|subscriptions|library|trending|recommended)')
  62. _NETRC_MACHINE = 'youtube'
  63. # If True it will raise an error if no login info is provided
  64. _LOGIN_REQUIRED = False
  65. _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,})'
  66. _YOUTUBE_CLIENT_HEADERS = {
  67. 'x-youtube-client-name': '1',
  68. 'x-youtube-client-version': '1.20200609.04.02',
  69. }
  70. def _set_language(self):
  71. self._set_cookie(
  72. '.youtube.com', 'PREF', 'f1=50000000&f6=8&hl=en',
  73. # YouTube sets the expire time to about two months
  74. expire_time=time.time() + 2 * 30 * 24 * 3600)
  75. def _ids_to_results(self, ids):
  76. return [
  77. self.url_result(vid_id, 'Youtube', video_id=vid_id)
  78. for vid_id in ids]
  79. def _login(self):
  80. """
  81. Attempt to log in to YouTube.
  82. True is returned if successful or skipped.
  83. False is returned if login failed.
  84. If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
  85. """
  86. username, password = self._get_login_info()
  87. # No authentication to be performed
  88. if username is None:
  89. if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
  90. raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
  91. if self._downloader.params.get('cookiefile') and False: # TODO remove 'and False' later - too many people using outdated cookies and open issues, remind them.
  92. self.to_screen('[Cookies] Reminder - Make sure to always use up to date cookies!')
  93. return True
  94. login_page = self._download_webpage(
  95. self._LOGIN_URL, None,
  96. note='Downloading login page',
  97. errnote='unable to fetch login page', fatal=False)
  98. if login_page is False:
  99. return
  100. login_form = self._hidden_inputs(login_page)
  101. def req(url, f_req, note, errnote):
  102. data = login_form.copy()
  103. data.update({
  104. 'pstMsg': 1,
  105. 'checkConnection': 'youtube',
  106. 'checkedDomains': 'youtube',
  107. 'hl': 'en',
  108. 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
  109. 'f.req': json.dumps(f_req),
  110. 'flowName': 'GlifWebSignIn',
  111. 'flowEntry': 'ServiceLogin',
  112. # TODO: reverse actual botguard identifier generation algo
  113. 'bgRequest': '["identifier",""]',
  114. })
  115. return self._download_json(
  116. url, None, note=note, errnote=errnote,
  117. transform_source=lambda s: re.sub(r'^[^[]*', '', s),
  118. fatal=False,
  119. data=urlencode_postdata(data), headers={
  120. 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
  121. 'Google-Accounts-XSRF': 1,
  122. })
  123. def warn(message):
  124. self._downloader.report_warning(message)
  125. lookup_req = [
  126. username,
  127. None, [], None, 'US', None, None, 2, False, True,
  128. [
  129. None, None,
  130. [2, 1, None, 1,
  131. 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
  132. None, [], 4],
  133. 1, [None, None, []], None, None, None, True
  134. ],
  135. username,
  136. ]
  137. lookup_results = req(
  138. self._LOOKUP_URL, lookup_req,
  139. 'Looking up account info', 'Unable to look up account info')
  140. if lookup_results is False:
  141. return False
  142. user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
  143. if not user_hash:
  144. warn('Unable to extract user hash')
  145. return False
  146. challenge_req = [
  147. user_hash,
  148. None, 1, None, [1, None, None, None, [password, None, True]],
  149. [
  150. None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
  151. 1, [None, None, []], None, None, None, True
  152. ]]
  153. challenge_results = req(
  154. self._CHALLENGE_URL, challenge_req,
  155. 'Logging in', 'Unable to log in')
  156. if challenge_results is False:
  157. return
  158. login_res = try_get(challenge_results, lambda x: x[0][5], list)
  159. if login_res:
  160. login_msg = try_get(login_res, lambda x: x[5], compat_str)
  161. warn(
  162. 'Unable to login: %s' % 'Invalid password'
  163. if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
  164. return False
  165. res = try_get(challenge_results, lambda x: x[0][-1], list)
  166. if not res:
  167. warn('Unable to extract result entry')
  168. return False
  169. login_challenge = try_get(res, lambda x: x[0][0], list)
  170. if login_challenge:
  171. challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
  172. if challenge_str == 'TWO_STEP_VERIFICATION':
  173. # SEND_SUCCESS - TFA code has been successfully sent to phone
  174. # QUOTA_EXCEEDED - reached the limit of TFA codes
  175. status = try_get(login_challenge, lambda x: x[5], compat_str)
  176. if status == 'QUOTA_EXCEEDED':
  177. warn('Exceeded the limit of TFA codes, try later')
  178. return False
  179. tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
  180. if not tl:
  181. warn('Unable to extract TL')
  182. return False
  183. tfa_code = self._get_tfa_info('2-step verification code')
  184. if not tfa_code:
  185. warn(
  186. 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
  187. '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
  188. return False
  189. tfa_code = remove_start(tfa_code, 'G-')
  190. tfa_req = [
  191. user_hash, None, 2, None,
  192. [
  193. 9, None, None, None, None, None, None, None,
  194. [None, tfa_code, True, 2]
  195. ]]
  196. tfa_results = req(
  197. self._TFA_URL.format(tl), tfa_req,
  198. 'Submitting TFA code', 'Unable to submit TFA code')
  199. if tfa_results is False:
  200. return False
  201. tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
  202. if tfa_res:
  203. tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
  204. warn(
  205. 'Unable to finish TFA: %s' % 'Invalid TFA code'
  206. if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
  207. return False
  208. check_cookie_url = try_get(
  209. tfa_results, lambda x: x[0][-1][2], compat_str)
  210. else:
  211. CHALLENGES = {
  212. 'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
  213. 'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
  214. 'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
  215. }
  216. challenge = CHALLENGES.get(
  217. challenge_str,
  218. '%s returned error %s.' % (self.IE_NAME, challenge_str))
  219. warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
  220. return False
  221. else:
  222. check_cookie_url = try_get(res, lambda x: x[2], compat_str)
  223. if not check_cookie_url:
  224. warn('Unable to extract CheckCookie URL')
  225. return False
  226. check_cookie_results = self._download_webpage(
  227. check_cookie_url, None, 'Checking cookie', fatal=False)
  228. if check_cookie_results is False:
  229. return False
  230. if 'https://myaccount.google.com/' not in check_cookie_results:
  231. warn('Unable to log in')
  232. return False
  233. return True
  234. def _download_webpage_handle(self, *args, **kwargs):
  235. query = kwargs.get('query', {}).copy()
  236. kwargs['query'] = query
  237. return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
  238. *args, **compat_kwargs(kwargs))
  239. def _get_yt_initial_data(self, video_id, webpage):
  240. config = self._search_regex(
  241. (r'window\["ytInitialData"\]\s*=\s*(.*?)(?<=});',
  242. r'var\s+ytInitialData\s*=\s*(.*?)(?<=});'),
  243. webpage, 'ytInitialData', default=None)
  244. if config:
  245. return self._parse_json(
  246. uppercase_escape(config), video_id, fatal=False)
  247. def _real_initialize(self):
  248. if self._downloader is None:
  249. return
  250. self._set_language()
  251. if not self._login():
  252. return
  253. _DEFAULT_API_DATA = {
  254. 'context': {
  255. 'client': {
  256. 'clientName': 'WEB',
  257. 'clientVersion': '2.20201021.03.00',
  258. }
  259. },
  260. }
  261. _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
  262. def _call_api(self, ep, query, video_id):
  263. data = self._DEFAULT_API_DATA.copy()
  264. data.update(query)
  265. response = self._download_json(
  266. 'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
  267. note='Downloading API JSON', errnote='Unable to download API page',
  268. data=json.dumps(data).encode('utf8'),
  269. headers={'content-type': 'application/json'},
  270. query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
  271. return response
  272. def _extract_yt_initial_data(self, video_id, webpage):
  273. return self._parse_json(
  274. self._search_regex(
  275. (r'%s\s*\n' % self._YT_INITIAL_DATA_RE,
  276. self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
  277. video_id)
  278. class YoutubeIE(YoutubeBaseInfoExtractor):
  279. IE_DESC = 'YouTube.com'
  280. _VALID_URL = r"""(?x)^
  281. (
  282. (?:https?://|//) # http(s):// or protocol-independent URL
  283. (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/|
  284. (?:www\.)?deturl\.com/www\.youtube\.com/|
  285. (?:www\.)?pwnyoutube\.com/|
  286. (?:www\.)?hooktube\.com/|
  287. (?:www\.)?yourepeat\.com/|
  288. tube\.majestyc\.net/|
  289. # Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances
  290. (?:(?:www|dev)\.)?invidio\.us/|
  291. (?:(?:www|no)\.)?invidiou\.sh/|
  292. (?:(?:www|fi|de)\.)?invidious\.snopyta\.org/|
  293. (?:www\.)?invidious\.kabi\.tk/|
  294. (?:www\.)?invidious\.13ad\.de/|
  295. (?:www\.)?invidious\.mastodon\.host/|
  296. (?:www\.)?invidious\.nixnet\.xyz/|
  297. (?:www\.)?invidious\.drycat\.fr/|
  298. (?:www\.)?tube\.poal\.co/|
  299. (?:www\.)?vid\.wxzm\.sx/|
  300. (?:www\.)?yewtu\.be/|
  301. (?:www\.)?yt\.elukerio\.org/|
  302. (?:www\.)?yt\.lelux\.fi/|
  303. (?:www\.)?invidious\.ggc-project\.de/|
  304. (?:www\.)?yt\.maisputain\.ovh/|
  305. (?:www\.)?invidious\.13ad\.de/|
  306. (?:www\.)?invidious\.toot\.koeln/|
  307. (?:www\.)?invidious\.fdn\.fr/|
  308. (?:www\.)?watch\.nettohikari\.com/|
  309. (?:www\.)?kgg2m7yk5aybusll\.onion/|
  310. (?:www\.)?qklhadlycap4cnod\.onion/|
  311. (?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/|
  312. (?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion/|
  313. (?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/|
  314. (?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/|
  315. (?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/|
  316. (?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion/|
  317. youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
  318. (?:.*?\#/)? # handle anchor (#/) redirect urls
  319. (?: # the various things that can precede the ID:
  320. (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
  321. |(?: # or the v= param in all its forms
  322. (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
  323. (?:\?|\#!?) # the params delimiter ? or # or #!
  324. (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
  325. v=
  326. )
  327. ))
  328. |(?:
  329. youtu\.be| # just youtu.be/xxxx
  330. vid\.plus| # or vid.plus/xxxx
  331. zwearz\.com/watch| # or zwearz.com/watch/xxxx
  332. )/
  333. |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
  334. )
  335. )? # all until now is optional -> you can pass the naked ID
  336. (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
  337. (?!.*?\blist=
  338. (?:
  339. %(playlist_id)s| # combined list/video URLs are handled by the playlist IE
  340. WL # WL are handled by the watch later IE
  341. )
  342. )
  343. (?(1).+)? # if we found the ID, everything can follow
  344. $""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  345. _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
  346. _PLAYER_INFO_RE = (
  347. r'/(?P<id>[a-zA-Z0-9_-]{8,})/player_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?/base\.(?P<ext>[a-z]+)$',
  348. r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.(?P<ext>[a-z]+)$',
  349. )
  350. _formats = {
  351. '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  352. '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  353. '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
  354. '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
  355. '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
  356. '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  357. '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  358. '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  359. # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
  360. '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
  361. '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  362. '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  363. '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  364. '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  365. '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  366. '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  367. '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  368. '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  369. # 3D videos
  370. '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  371. '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  372. '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  373. '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  374. '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
  375. '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  376. '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  377. # Apple HTTP Live Streaming
  378. '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  379. '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  380. '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  381. '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  382. '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  383. '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  384. '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  385. '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
  386. # DASH mp4 video
  387. '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
  388. '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
  389. '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  390. '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
  391. '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
  392. '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
  393. '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
  394. '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  395. '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
  396. '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  397. '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  398. '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
  399. # Dash mp4 audio
  400. '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
  401. '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
  402. '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
  403. '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  404. '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  405. '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
  406. '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
  407. # Dash webm
  408. '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  409. '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  410. '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  411. '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  412. '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  413. '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  414. '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
  415. '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  416. '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  417. '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  418. '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  419. '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  420. '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  421. '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  422. '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  423. # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
  424. '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  425. '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  426. '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  427. '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  428. '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  429. '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  430. # Dash webm audio
  431. '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
  432. '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
  433. # Dash webm audio with opus inside
  434. '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
  435. '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
  436. '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
  437. # RTMP (unnamed)
  438. '_rtmp': {'protocol': 'rtmp'},
  439. # av01 video only formats sometimes served with "unknown" codecs
  440. '394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  441. '395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  442. '396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  443. '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  444. }
  445. _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
  446. _GEO_BYPASS = False
  447. IE_NAME = 'youtube'
  448. _TESTS = [
  449. {
  450. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
  451. 'info_dict': {
  452. 'id': 'BaW_jenozKc',
  453. 'ext': 'mp4',
  454. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  455. 'uploader': 'Philipp Hagemeister',
  456. 'uploader_id': 'phihag',
  457. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
  458. 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
  459. 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
  460. 'upload_date': '20121002',
  461. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  462. 'categories': ['Science & Technology'],
  463. 'tags': ['youtube-dl'],
  464. 'duration': 10,
  465. 'view_count': int,
  466. 'like_count': int,
  467. 'dislike_count': int,
  468. 'start_time': 1,
  469. 'end_time': 9,
  470. }
  471. },
  472. {
  473. 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
  474. 'note': 'Embed-only video (#1746)',
  475. 'info_dict': {
  476. 'id': 'yZIXLfi8CZQ',
  477. 'ext': 'mp4',
  478. 'upload_date': '20120608',
  479. 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
  480. 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
  481. 'uploader': 'SET India',
  482. 'uploader_id': 'setindia',
  483. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
  484. 'age_limit': 18,
  485. }
  486. },
  487. {
  488. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
  489. 'note': 'Use the first video ID in the URL',
  490. 'info_dict': {
  491. 'id': 'BaW_jenozKc',
  492. 'ext': 'mp4',
  493. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  494. 'uploader': 'Philipp Hagemeister',
  495. 'uploader_id': 'phihag',
  496. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
  497. 'upload_date': '20121002',
  498. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  499. 'categories': ['Science & Technology'],
  500. 'tags': ['youtube-dl'],
  501. 'duration': 10,
  502. 'view_count': int,
  503. 'like_count': int,
  504. 'dislike_count': int,
  505. },
  506. 'params': {
  507. 'skip_download': True,
  508. },
  509. },
  510. {
  511. 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
  512. 'note': '256k DASH audio (format 141) via DASH manifest',
  513. 'info_dict': {
  514. 'id': 'a9LDPn-MO4I',
  515. 'ext': 'm4a',
  516. 'upload_date': '20121002',
  517. 'uploader_id': '8KVIDEO',
  518. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
  519. 'description': '',
  520. 'uploader': '8KVIDEO',
  521. 'title': 'UHDTV TEST 8K VIDEO.mp4'
  522. },
  523. 'params': {
  524. 'youtube_include_dash_manifest': True,
  525. 'format': '141',
  526. },
  527. 'skip': 'format 141 not served anymore',
  528. },
  529. # DASH manifest with encrypted signature
  530. {
  531. 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
  532. 'info_dict': {
  533. 'id': 'IB3lcPjvWLA',
  534. 'ext': 'm4a',
  535. 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
  536. 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
  537. 'duration': 244,
  538. 'uploader': 'AfrojackVEVO',
  539. 'uploader_id': 'AfrojackVEVO',
  540. 'upload_date': '20131011',
  541. },
  542. 'params': {
  543. 'youtube_include_dash_manifest': True,
  544. 'format': '141/bestaudio[ext=m4a]',
  545. },
  546. },
  547. # Controversy video
  548. {
  549. 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
  550. 'info_dict': {
  551. 'id': 'T4XJQO3qol8',
  552. 'ext': 'mp4',
  553. 'duration': 219,
  554. 'upload_date': '20100909',
  555. 'uploader': 'Amazing Atheist',
  556. 'uploader_id': 'TheAmazingAtheist',
  557. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
  558. 'title': 'Burning Everyone\'s Koran',
  559. 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
  560. }
  561. },
  562. # Normal age-gate video (embed allowed)
  563. {
  564. 'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
  565. 'info_dict': {
  566. 'id': 'HtVdAasjOgU',
  567. 'ext': 'mp4',
  568. 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
  569. 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
  570. 'duration': 142,
  571. 'uploader': 'The Witcher',
  572. 'uploader_id': 'WitcherGame',
  573. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
  574. 'upload_date': '20140605',
  575. 'age_limit': 18,
  576. },
  577. },
  578. # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
  579. # YouTube Red ad is not captured for creator
  580. {
  581. 'url': '__2ABJjxzNo',
  582. 'info_dict': {
  583. 'id': '__2ABJjxzNo',
  584. 'ext': 'mp4',
  585. 'duration': 266,
  586. 'upload_date': '20100430',
  587. 'uploader_id': 'deadmau5',
  588. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
  589. 'creator': 'Dada Life, deadmau5',
  590. 'description': 'md5:12c56784b8032162bb936a5f76d55360',
  591. 'uploader': 'deadmau5',
  592. 'title': 'Deadmau5 - Some Chords (HD)',
  593. 'alt_title': 'This Machine Kills Some Chords',
  594. },
  595. 'expected_warnings': [
  596. 'DASH manifest missing',
  597. ]
  598. },
  599. # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
  600. {
  601. 'url': 'lqQg6PlCWgI',
  602. 'info_dict': {
  603. 'id': 'lqQg6PlCWgI',
  604. 'ext': 'mp4',
  605. 'duration': 6085,
  606. 'upload_date': '20150827',
  607. 'uploader_id': 'olympic',
  608. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
  609. 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
  610. 'uploader': 'Olympic',
  611. 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
  612. },
  613. 'params': {
  614. 'skip_download': 'requires avconv',
  615. }
  616. },
  617. # Non-square pixels
  618. {
  619. 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
  620. 'info_dict': {
  621. 'id': '_b-2C3KPAM0',
  622. 'ext': 'mp4',
  623. 'stretched_ratio': 16 / 9.,
  624. 'duration': 85,
  625. 'upload_date': '20110310',
  626. 'uploader_id': 'AllenMeow',
  627. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
  628. 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
  629. 'uploader': '孫ᄋᄅ',
  630. 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
  631. },
  632. },
  633. # url_encoded_fmt_stream_map is empty string
  634. {
  635. 'url': 'qEJwOuvDf7I',
  636. 'info_dict': {
  637. 'id': 'qEJwOuvDf7I',
  638. 'ext': 'webm',
  639. 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
  640. 'description': '',
  641. 'upload_date': '20150404',
  642. 'uploader_id': 'spbelect',
  643. 'uploader': 'Наблюдатели Петербурга',
  644. },
  645. 'params': {
  646. 'skip_download': 'requires avconv',
  647. },
  648. 'skip': 'This live event has ended.',
  649. },
  650. # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
  651. {
  652. 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
  653. 'info_dict': {
  654. 'id': 'FIl7x6_3R5Y',
  655. 'ext': 'webm',
  656. 'title': 'md5:7b81415841e02ecd4313668cde88737a',
  657. 'description': 'md5:116377fd2963b81ec4ce64b542173306',
  658. 'duration': 220,
  659. 'upload_date': '20150625',
  660. 'uploader_id': 'dorappi2000',
  661. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
  662. 'uploader': 'dorappi2000',
  663. 'formats': 'mincount:31',
  664. },
  665. 'skip': 'not actual anymore',
  666. },
  667. # DASH manifest with segment_list
  668. {
  669. 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
  670. 'md5': '8ce563a1d667b599d21064e982ab9e31',
  671. 'info_dict': {
  672. 'id': 'CsmdDsKjzN8',
  673. 'ext': 'mp4',
  674. 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
  675. 'uploader': 'Airtek',
  676. 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
  677. 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
  678. 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
  679. },
  680. 'params': {
  681. 'youtube_include_dash_manifest': True,
  682. 'format': '135', # bestvideo
  683. },
  684. 'skip': 'This live event has ended.',
  685. },
  686. {
  687. # Multifeed videos (multiple cameras), URL is for Main Camera
  688. 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
  689. 'info_dict': {
  690. 'id': 'jqWvoWXjCVs',
  691. 'title': 'teamPGP: Rocket League Noob Stream',
  692. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  693. },
  694. 'playlist': [{
  695. 'info_dict': {
  696. 'id': 'jqWvoWXjCVs',
  697. 'ext': 'mp4',
  698. 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
  699. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  700. 'duration': 7335,
  701. 'upload_date': '20150721',
  702. 'uploader': 'Beer Games Beer',
  703. 'uploader_id': 'beergamesbeer',
  704. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
  705. 'license': 'Standard YouTube License',
  706. },
  707. }, {
  708. 'info_dict': {
  709. 'id': '6h8e8xoXJzg',
  710. 'ext': 'mp4',
  711. 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
  712. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  713. 'duration': 7337,
  714. 'upload_date': '20150721',
  715. 'uploader': 'Beer Games Beer',
  716. 'uploader_id': 'beergamesbeer',
  717. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
  718. 'license': 'Standard YouTube License',
  719. },
  720. }, {
  721. 'info_dict': {
  722. 'id': 'PUOgX5z9xZw',
  723. 'ext': 'mp4',
  724. 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
  725. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  726. 'duration': 7337,
  727. 'upload_date': '20150721',
  728. 'uploader': 'Beer Games Beer',
  729. 'uploader_id': 'beergamesbeer',
  730. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
  731. 'license': 'Standard YouTube License',
  732. },
  733. }, {
  734. 'info_dict': {
  735. 'id': 'teuwxikvS5k',
  736. 'ext': 'mp4',
  737. 'title': 'teamPGP: Rocket League Noob Stream (zim)',
  738. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  739. 'duration': 7334,
  740. 'upload_date': '20150721',
  741. 'uploader': 'Beer Games Beer',
  742. 'uploader_id': 'beergamesbeer',
  743. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
  744. 'license': 'Standard YouTube License',
  745. },
  746. }],
  747. 'params': {
  748. 'skip_download': True,
  749. },
  750. 'skip': 'This video is not available.',
  751. },
  752. {
  753. # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
  754. 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
  755. 'info_dict': {
  756. 'id': 'gVfLd0zydlo',
  757. 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
  758. },
  759. 'playlist_count': 2,
  760. 'skip': 'Not multifeed anymore',
  761. },
  762. {
  763. 'url': 'https://vid.plus/FlRa-iH7PGw',
  764. 'only_matching': True,
  765. },
  766. {
  767. 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
  768. 'only_matching': True,
  769. },
  770. {
  771. # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  772. # Also tests cut-off URL expansion in video description (see
  773. # https://github.com/ytdl-org/youtube-dl/issues/1892,
  774. # https://github.com/ytdl-org/youtube-dl/issues/8164)
  775. 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
  776. 'info_dict': {
  777. 'id': 'lsguqyKfVQg',
  778. 'ext': 'mp4',
  779. 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
  780. 'alt_title': 'Dark Walk - Position Music',
  781. 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
  782. 'duration': 133,
  783. 'upload_date': '20151119',
  784. 'uploader_id': 'IronSoulElf',
  785. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
  786. 'uploader': 'IronSoulElf',
  787. 'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
  788. 'track': 'Dark Walk - Position Music',
  789. 'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
  790. 'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
  791. },
  792. 'params': {
  793. 'skip_download': True,
  794. },
  795. },
  796. {
  797. # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  798. 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
  799. 'only_matching': True,
  800. },
  801. {
  802. # Video with yt:stretch=17:0
  803. 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
  804. 'info_dict': {
  805. 'id': 'Q39EVAstoRM',
  806. 'ext': 'mp4',
  807. 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
  808. 'description': 'md5:ee18a25c350637c8faff806845bddee9',
  809. 'upload_date': '20151107',
  810. 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
  811. 'uploader': 'CH GAMER DROID',
  812. },
  813. 'params': {
  814. 'skip_download': True,
  815. },
  816. 'skip': 'This video does not exist.',
  817. },
  818. {
  819. # Video licensed under Creative Commons
  820. 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
  821. 'info_dict': {
  822. 'id': 'M4gD1WSo5mA',
  823. 'ext': 'mp4',
  824. 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
  825. 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
  826. 'duration': 721,
  827. 'upload_date': '20150127',
  828. 'uploader_id': 'BerkmanCenter',
  829. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
  830. 'uploader': 'The Berkman Klein Center for Internet & Society',
  831. 'license': 'Creative Commons Attribution license (reuse allowed)',
  832. },
  833. 'params': {
  834. 'skip_download': True,
  835. },
  836. },
  837. {
  838. # Channel-like uploader_url
  839. 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
  840. 'info_dict': {
  841. 'id': 'eQcmzGIKrzg',
  842. 'ext': 'mp4',
  843. 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
  844. 'description': 'md5:dda0d780d5a6e120758d1711d062a867',
  845. 'duration': 4060,
  846. 'upload_date': '20151119',
  847. 'uploader': 'Bernie Sanders',
  848. 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
  849. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
  850. 'license': 'Creative Commons Attribution license (reuse allowed)',
  851. },
  852. 'params': {
  853. 'skip_download': True,
  854. },
  855. },
  856. {
  857. 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
  858. 'only_matching': True,
  859. },
  860. {
  861. # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
  862. 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
  863. 'only_matching': True,
  864. },
  865. {
  866. # Rental video preview
  867. 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
  868. 'info_dict': {
  869. 'id': 'uGpuVWrhIzE',
  870. 'ext': 'mp4',
  871. 'title': 'Piku - Trailer',
  872. 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
  873. 'upload_date': '20150811',
  874. 'uploader': 'FlixMatrix',
  875. 'uploader_id': 'FlixMatrixKaravan',
  876. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
  877. 'license': 'Standard YouTube License',
  878. },
  879. 'params': {
  880. 'skip_download': True,
  881. },
  882. 'skip': 'This video is not available.',
  883. },
  884. {
  885. # YouTube Red video with episode data
  886. 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
  887. 'info_dict': {
  888. 'id': 'iqKdEhx-dD4',
  889. 'ext': 'mp4',
  890. 'title': 'Isolation - Mind Field (Ep 1)',
  891. 'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
  892. 'duration': 2085,
  893. 'upload_date': '20170118',
  894. 'uploader': 'Vsauce',
  895. 'uploader_id': 'Vsauce',
  896. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
  897. 'series': 'Mind Field',
  898. 'season_number': 1,
  899. 'episode_number': 1,
  900. },
  901. 'params': {
  902. 'skip_download': True,
  903. },
  904. 'expected_warnings': [
  905. 'Skipping DASH manifest',
  906. ],
  907. },
  908. {
  909. # The following content has been identified by the YouTube community
  910. # as inappropriate or offensive to some audiences.
  911. 'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
  912. 'info_dict': {
  913. 'id': '6SJNVb0GnPI',
  914. 'ext': 'mp4',
  915. 'title': 'Race Differences in Intelligence',
  916. 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
  917. 'duration': 965,
  918. 'upload_date': '20140124',
  919. 'uploader': 'New Century Foundation',
  920. 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
  921. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
  922. },
  923. 'params': {
  924. 'skip_download': True,
  925. },
  926. },
  927. {
  928. # itag 212
  929. 'url': '1t24XAntNCY',
  930. 'only_matching': True,
  931. },
  932. {
  933. # geo restricted to JP
  934. 'url': 'sJL6WA-aGkQ',
  935. 'only_matching': True,
  936. },
  937. {
  938. 'url': 'https://invidio.us/watch?v=BaW_jenozKc',
  939. 'only_matching': True,
  940. },
  941. {
  942. # DRM protected
  943. 'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
  944. 'only_matching': True,
  945. },
  946. {
  947. # Video with unsupported adaptive stream type formats
  948. 'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
  949. 'info_dict': {
  950. 'id': 'Z4Vy8R84T1U',
  951. 'ext': 'mp4',
  952. 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
  953. 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
  954. 'duration': 433,
  955. 'upload_date': '20130923',
  956. 'uploader': 'Amelia Putri Harwita',
  957. 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
  958. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
  959. 'formats': 'maxcount:10',
  960. },
  961. 'params': {
  962. 'skip_download': True,
  963. 'youtube_include_dash_manifest': False,
  964. },
  965. 'skip': 'not actual anymore',
  966. },
  967. {
  968. # Youtube Music Auto-generated description
  969. 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
  970. 'info_dict': {
  971. 'id': 'MgNrAu2pzNs',
  972. 'ext': 'mp4',
  973. 'title': 'Voyeur Girl',
  974. 'description': 'md5:7ae382a65843d6df2685993e90a8628f',
  975. 'upload_date': '20190312',
  976. 'uploader': 'Stephen - Topic',
  977. 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
  978. 'artist': 'Stephen',
  979. 'track': 'Voyeur Girl',
  980. 'album': 'it\'s too much love to know my dear',
  981. 'release_date': '20190313',
  982. 'release_year': 2019,
  983. },
  984. 'params': {
  985. 'skip_download': True,
  986. },
  987. },
  988. {
  989. 'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
  990. 'only_matching': True,
  991. },
  992. {
  993. # invalid -> valid video id redirection
  994. 'url': 'DJztXj2GPfl',
  995. 'info_dict': {
  996. 'id': 'DJztXj2GPfk',
  997. 'ext': 'mp4',
  998. 'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
  999. 'description': 'md5:bf577a41da97918e94fa9798d9228825',
  1000. 'upload_date': '20090125',
  1001. 'uploader': 'Prochorowka',
  1002. 'uploader_id': 'Prochorowka',
  1003. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
  1004. 'artist': 'Panjabi MC',
  1005. 'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
  1006. 'album': 'Beware of the Boys (Mundian To Bach Ke)',
  1007. },
  1008. 'params': {
  1009. 'skip_download': True,
  1010. },
  1011. },
  1012. {
  1013. # empty description results in an empty string
  1014. 'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
  1015. 'info_dict': {
  1016. 'id': 'x41yOUIvK2k',
  1017. 'ext': 'mp4',
  1018. 'title': 'IMG 3456',
  1019. 'description': '',
  1020. 'upload_date': '20170613',
  1021. 'uploader_id': 'ElevageOrVert',
  1022. 'uploader': 'ElevageOrVert',
  1023. },
  1024. 'params': {
  1025. 'skip_download': True,
  1026. },
  1027. },
  1028. {
  1029. # with '};' inside yt initial data (see https://github.com/ytdl-org/youtube-dl/issues/27093)
  1030. 'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
  1031. 'info_dict': {
  1032. 'id': 'CHqg6qOn4no',
  1033. 'ext': 'mp4',
  1034. 'title': 'Part 77 Sort a list of simple types in c#',
  1035. 'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
  1036. 'upload_date': '20130831',
  1037. 'uploader_id': 'kudvenkat',
  1038. 'uploader': 'kudvenkat',
  1039. },
  1040. 'params': {
  1041. 'skip_download': True,
  1042. },
  1043. },
  1044. ]
  1045. def __init__(self, *args, **kwargs):
  1046. super(YoutubeIE, self).__init__(*args, **kwargs)
  1047. self._player_cache = {}
  1048. def report_video_info_webpage_download(self, video_id):
  1049. """Report attempt to download video info webpage."""
  1050. self.to_screen('%s: Downloading video info webpage' % video_id)
  1051. def report_information_extraction(self, video_id):
  1052. """Report attempt to extract video information."""
  1053. self.to_screen('%s: Extracting video information' % video_id)
  1054. def report_unavailable_format(self, video_id, format):
  1055. """Report extracted video URL."""
  1056. self.to_screen('%s: Format %s not available' % (video_id, format))
  1057. def report_rtmp_download(self):
  1058. """Indicate the download will use the RTMP protocol."""
  1059. self.to_screen('RTMP download detected')
  1060. def _signature_cache_id(self, example_sig):
  1061. """ Return a string representation of a signature """
  1062. return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
  1063. @classmethod
  1064. def _extract_player_info(cls, player_url):
  1065. for player_re in cls._PLAYER_INFO_RE:
  1066. id_m = re.search(player_re, player_url)
  1067. if id_m:
  1068. break
  1069. else:
  1070. raise ExtractorError('Cannot identify player %r' % player_url)
  1071. return id_m.group('ext'), id_m.group('id')
  1072. def _extract_signature_function(self, video_id, player_url, example_sig):
  1073. player_type, player_id = self._extract_player_info(player_url)
  1074. # Read from filesystem cache
  1075. func_id = '%s_%s_%s' % (
  1076. player_type, player_id, self._signature_cache_id(example_sig))
  1077. assert os.path.basename(func_id) == func_id
  1078. cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
  1079. if cache_spec is not None:
  1080. return lambda s: ''.join(s[i] for i in cache_spec)
  1081. download_note = (
  1082. 'Downloading player %s' % player_url
  1083. if self._downloader.params.get('verbose') else
  1084. 'Downloading %s player %s' % (player_type, player_id)
  1085. )
  1086. if player_type == 'js':
  1087. code = self._download_webpage(
  1088. player_url, video_id,
  1089. note=download_note,
  1090. errnote='Download of %s failed' % player_url)
  1091. res = self._parse_sig_js(code)
  1092. elif player_type == 'swf':
  1093. urlh = self._request_webpage(
  1094. player_url, video_id,
  1095. note=download_note,
  1096. errnote='Download of %s failed' % player_url)
  1097. code = urlh.read()
  1098. res = self._parse_sig_swf(code)
  1099. else:
  1100. assert False, 'Invalid player type %r' % player_type
  1101. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1102. cache_res = res(test_string)
  1103. cache_spec = [ord(c) for c in cache_res]
  1104. self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
  1105. return res
  1106. def _print_sig_code(self, func, example_sig):
  1107. def gen_sig_code(idxs):
  1108. def _genslice(start, end, step):
  1109. starts = '' if start == 0 else str(start)
  1110. ends = (':%d' % (end + step)) if end + step >= 0 else ':'
  1111. steps = '' if step == 1 else (':%d' % step)
  1112. return 's[%s%s%s]' % (starts, ends, steps)
  1113. step = None
  1114. # Quelch pyflakes warnings - start will be set when step is set
  1115. start = '(Never used)'
  1116. for i, prev in zip(idxs[1:], idxs[:-1]):
  1117. if step is not None:
  1118. if i - prev == step:
  1119. continue
  1120. yield _genslice(start, prev, step)
  1121. step = None
  1122. continue
  1123. if i - prev in [-1, 1]:
  1124. step = i - prev
  1125. start = prev
  1126. continue
  1127. else:
  1128. yield 's[%d]' % prev
  1129. if step is None:
  1130. yield 's[%d]' % i
  1131. else:
  1132. yield _genslice(start, i, step)
  1133. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1134. cache_res = func(test_string)
  1135. cache_spec = [ord(c) for c in cache_res]
  1136. expr_code = ' + '.join(gen_sig_code(cache_spec))
  1137. signature_id_tuple = '(%s)' % (
  1138. ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
  1139. code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
  1140. ' return %s\n') % (signature_id_tuple, expr_code)
  1141. self.to_screen('Extracted signature function:\n' + code)
  1142. def _parse_sig_js(self, jscode):
  1143. funcname = self._search_regex(
  1144. (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1145. r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1146. r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
  1147. r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
  1148. # Obsolete patterns
  1149. r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1150. r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
  1151. r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1152. r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1153. r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1154. r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1155. r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1156. r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
  1157. jscode, 'Initial JS player signature function name', group='sig')
  1158. jsi = JSInterpreter(jscode)
  1159. initial_function = jsi.extract_function(funcname)
  1160. return lambda s: initial_function([s])
  1161. def _parse_sig_swf(self, file_contents):
  1162. swfi = SWFInterpreter(file_contents)
  1163. TARGET_CLASSNAME = 'SignatureDecipher'
  1164. searched_class = swfi.extract_class(TARGET_CLASSNAME)
  1165. initial_function = swfi.extract_function(searched_class, 'decipher')
  1166. return lambda s: initial_function([s])
  1167. def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
  1168. """Turn the encrypted s field into a working signature"""
  1169. if player_url is None:
  1170. raise ExtractorError('Cannot decrypt signature without player_url')
  1171. if player_url.startswith('//'):
  1172. player_url = 'https:' + player_url
  1173. elif not re.match(r'https?://', player_url):
  1174. player_url = compat_urlparse.urljoin(
  1175. 'https://www.youtube.com', player_url)
  1176. try:
  1177. player_id = (player_url, self._signature_cache_id(s))
  1178. if player_id not in self._player_cache:
  1179. func = self._extract_signature_function(
  1180. video_id, player_url, s
  1181. )
  1182. self._player_cache[player_id] = func
  1183. func = self._player_cache[player_id]
  1184. if self._downloader.params.get('youtube_print_sig_code'):
  1185. self._print_sig_code(func, s)
  1186. return func(s)
  1187. except Exception as e:
  1188. tb = traceback.format_exc()
  1189. raise ExtractorError(
  1190. 'Signature extraction failed: ' + tb, cause=e)
  1191. def _get_subtitles(self, video_id, webpage, has_live_chat_replay):
  1192. try:
  1193. subs_doc = self._download_xml(
  1194. 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
  1195. video_id, note=False)
  1196. except ExtractorError as err:
  1197. self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
  1198. return {}
  1199. sub_lang_list = {}
  1200. for track in subs_doc.findall('track'):
  1201. lang = track.attrib['lang_code']
  1202. if lang in sub_lang_list:
  1203. continue
  1204. sub_formats = []
  1205. for ext in self._SUBTITLE_FORMATS:
  1206. params = compat_urllib_parse_urlencode({
  1207. 'lang': lang,
  1208. 'v': video_id,
  1209. 'fmt': ext,
  1210. 'name': track.attrib['name'].encode('utf-8'),
  1211. })
  1212. sub_formats.append({
  1213. 'url': 'https://www.youtube.com/api/timedtext?' + params,
  1214. 'ext': ext,
  1215. })
  1216. sub_lang_list[lang] = sub_formats
  1217. if has_live_chat_replay:
  1218. sub_lang_list['live_chat'] = [
  1219. {
  1220. 'video_id': video_id,
  1221. 'ext': 'json',
  1222. 'protocol': 'youtube_live_chat_replay',
  1223. },
  1224. ]
  1225. if not sub_lang_list:
  1226. self._downloader.report_warning('video doesn\'t have subtitles')
  1227. return {}
  1228. return sub_lang_list
  1229. def _get_ytplayer_config(self, video_id, webpage):
  1230. patterns = (
  1231. # User data may contain arbitrary character sequences that may affect
  1232. # JSON extraction with regex, e.g. when '};' is contained the second
  1233. # regex won't capture the whole JSON. Yet working around by trying more
  1234. # concrete regex first keeping in mind proper quoted string handling
  1235. # to be implemented in future that will replace this workaround (see
  1236. # https://github.com/ytdl-org/youtube-dl/issues/7468,
  1237. # https://github.com/ytdl-org/youtube-dl/pull/7599)
  1238. r';ytplayer\.config\s*=\s*({.+?});ytplayer',
  1239. r';ytplayer\.config\s*=\s*({.+?});',
  1240. r'ytInitialPlayerResponse\s*=\s*({.+?});var meta' # Needed???
  1241. )
  1242. config = self._search_regex(
  1243. patterns, webpage, 'ytplayer.config', default=None)
  1244. if config:
  1245. return self._parse_json(
  1246. uppercase_escape(config), video_id, fatal=False)
  1247. def _get_music_metadata_from_yt_initial(self, yt_initial):
  1248. music_metadata = []
  1249. key_map = {
  1250. 'Album': 'album',
  1251. 'Artist': 'artist',
  1252. 'Song': 'track'
  1253. }
  1254. contents = try_get(yt_initial, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'])
  1255. if type(contents) is list:
  1256. for content in contents:
  1257. music_track = {}
  1258. if type(content) is not dict:
  1259. continue
  1260. videoSecondaryInfoRenderer = try_get(content, lambda x: x['videoSecondaryInfoRenderer'])
  1261. if type(videoSecondaryInfoRenderer) is not dict:
  1262. continue
  1263. rows = try_get(videoSecondaryInfoRenderer, lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'])
  1264. if type(rows) is not list:
  1265. continue
  1266. for row in rows:
  1267. metadataRowRenderer = try_get(row, lambda x: x['metadataRowRenderer'])
  1268. if type(metadataRowRenderer) is not dict:
  1269. continue
  1270. key = try_get(metadataRowRenderer, lambda x: x['title']['simpleText'])
  1271. value = try_get(metadataRowRenderer, lambda x: x['contents'][0]['simpleText']) or \
  1272. try_get(metadataRowRenderer, lambda x: x['contents'][0]['runs'][0]['text'])
  1273. if type(key) is not str or type(value) is not str:
  1274. continue
  1275. if key in key_map:
  1276. if key_map[key] in music_track:
  1277. # we've started on a new track
  1278. music_metadata.append(music_track)
  1279. music_track = {}
  1280. music_track[key_map[key]] = value
  1281. if len(music_track.keys()):
  1282. music_metadata.append(music_track)
  1283. return music_metadata
  1284. def _get_automatic_captions(self, video_id, webpage):
  1285. """We need the webpage for getting the captions url, pass it as an
  1286. argument to speed up the process."""
  1287. self.to_screen('%s: Looking for automatic captions' % video_id)
  1288. player_config = self._get_ytplayer_config(video_id, webpage)
  1289. err_msg = 'Couldn\'t find automatic captions for %s' % video_id
  1290. if not player_config:
  1291. self._downloader.report_warning(err_msg)
  1292. return {}
  1293. try:
  1294. args = player_config['args']
  1295. caption_url = args.get('ttsurl')
  1296. if caption_url:
  1297. timestamp = args['timestamp']
  1298. # We get the available subtitles
  1299. list_params = compat_urllib_parse_urlencode({
  1300. 'type': 'list',
  1301. 'tlangs': 1,
  1302. 'asrs': 1,
  1303. })
  1304. list_url = caption_url + '&' + list_params
  1305. caption_list = self._download_xml(list_url, video_id)
  1306. original_lang_node = caption_list.find('track')
  1307. if original_lang_node is None:
  1308. self._downloader.report_warning('Video doesn\'t have automatic captions')
  1309. return {}
  1310. original_lang = original_lang_node.attrib['lang_code']
  1311. caption_kind = original_lang_node.attrib.get('kind', '')
  1312. sub_lang_list = {}
  1313. for lang_node in caption_list.findall('target'):
  1314. sub_lang = lang_node.attrib['lang_code']
  1315. sub_formats = []
  1316. for ext in self._SUBTITLE_FORMATS:
  1317. params = compat_urllib_parse_urlencode({
  1318. 'lang': original_lang,
  1319. 'tlang': sub_lang,
  1320. 'fmt': ext,
  1321. 'ts': timestamp,
  1322. 'kind': caption_kind,
  1323. })
  1324. sub_formats.append({
  1325. 'url': caption_url + '&' + params,
  1326. 'ext': ext,
  1327. })
  1328. sub_lang_list[sub_lang] = sub_formats
  1329. return sub_lang_list
  1330. def make_captions(sub_url, sub_langs):
  1331. parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
  1332. caption_qs = compat_parse_qs(parsed_sub_url.query)
  1333. captions = {}
  1334. for sub_lang in sub_langs:
  1335. sub_formats = []
  1336. for ext in self._SUBTITLE_FORMATS:
  1337. caption_qs.update({
  1338. 'tlang': [sub_lang],
  1339. 'fmt': [ext],
  1340. })
  1341. sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
  1342. query=compat_urllib_parse_urlencode(caption_qs, True)))
  1343. sub_formats.append({
  1344. 'url': sub_url,
  1345. 'ext': ext,
  1346. })
  1347. captions[sub_lang] = sub_formats
  1348. return captions
  1349. # New captions format as of 22.06.2017
  1350. player_response = args.get('player_response')
  1351. if player_response and isinstance(player_response, compat_str):
  1352. player_response = self._parse_json(
  1353. player_response, video_id, fatal=False)
  1354. if player_response:
  1355. renderer = player_response['captions']['playerCaptionsTracklistRenderer']
  1356. base_url = renderer['captionTracks'][0]['baseUrl']
  1357. sub_lang_list = []
  1358. for lang in renderer['translationLanguages']:
  1359. lang_code = lang.get('languageCode')
  1360. if lang_code:
  1361. sub_lang_list.append(lang_code)
  1362. return make_captions(base_url, sub_lang_list)
  1363. # Some videos don't provide ttsurl but rather caption_tracks and
  1364. # caption_translation_languages (e.g. 20LmZk1hakA)
  1365. # Does not used anymore as of 22.06.2017
  1366. caption_tracks = args['caption_tracks']
  1367. caption_translation_languages = args['caption_translation_languages']
  1368. caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
  1369. sub_lang_list = []
  1370. for lang in caption_translation_languages.split(','):
  1371. lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
  1372. sub_lang = lang_qs.get('lc', [None])[0]
  1373. if sub_lang:
  1374. sub_lang_list.append(sub_lang)
  1375. return make_captions(caption_url, sub_lang_list)
  1376. # An extractor error can be raise by the download process if there are
  1377. # no automatic captions but there are subtitles
  1378. except (KeyError, IndexError, ExtractorError):
  1379. self._downloader.report_warning(err_msg)
  1380. return {}
  1381. def _mark_watched(self, video_id, video_info, player_response):
  1382. playback_url = url_or_none(try_get(
  1383. player_response,
  1384. lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get(
  1385. video_info, lambda x: x['videostats_playback_base_url'][0]))
  1386. if not playback_url:
  1387. return
  1388. parsed_playback_url = compat_urlparse.urlparse(playback_url)
  1389. qs = compat_urlparse.parse_qs(parsed_playback_url.query)
  1390. # cpn generation algorithm is reverse engineered from base.js.
  1391. # In fact it works even with dummy cpn.
  1392. CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
  1393. cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
  1394. qs.update({
  1395. 'ver': ['2'],
  1396. 'cpn': [cpn],
  1397. })
  1398. playback_url = compat_urlparse.urlunparse(
  1399. parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
  1400. self._download_webpage(
  1401. playback_url, video_id, 'Marking watched',
  1402. 'Unable to mark watched', fatal=False)
  1403. @staticmethod
  1404. def _extract_urls(webpage):
  1405. # Embedded YouTube player
  1406. entries = [
  1407. unescapeHTML(mobj.group('url'))
  1408. for mobj in re.finditer(r'''(?x)
  1409. (?:
  1410. <iframe[^>]+?src=|
  1411. data-video-url=|
  1412. <embed[^>]+?src=|
  1413. embedSWF\(?:\s*|
  1414. <object[^>]+data=|
  1415. new\s+SWFObject\(
  1416. )
  1417. (["\'])
  1418. (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
  1419. (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
  1420. \1''', webpage)]
  1421. # lazyYT YouTube embed
  1422. entries.extend(list(map(
  1423. unescapeHTML,
  1424. re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
  1425. # Wordpress "YouTube Video Importer" plugin
  1426. matches = re.findall(r'''(?x)<div[^>]+
  1427. class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
  1428. data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
  1429. entries.extend(m[-1] for m in matches)
  1430. return entries
  1431. @staticmethod
  1432. def _extract_url(webpage):
  1433. urls = YoutubeIE._extract_urls(webpage)
  1434. return urls[0] if urls else None
  1435. @classmethod
  1436. def extract_id(cls, url):
  1437. mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
  1438. if mobj is None:
  1439. raise ExtractorError('Invalid URL: %s' % url)
  1440. video_id = mobj.group(2)
  1441. return video_id
  1442. def _extract_chapters_from_json(self, webpage, video_id, duration):
  1443. if not webpage:
  1444. return
  1445. data = self._extract_yt_initial_data(video_id, webpage)
  1446. if not data or not isinstance(data, dict):
  1447. return
  1448. chapters_list = try_get(
  1449. data,
  1450. lambda x: x['playerOverlays']
  1451. ['playerOverlayRenderer']
  1452. ['decoratedPlayerBarRenderer']
  1453. ['decoratedPlayerBarRenderer']
  1454. ['playerBar']
  1455. ['chapteredPlayerBarRenderer']
  1456. ['chapters'],
  1457. list)
  1458. if not chapters_list:
  1459. return
  1460. def chapter_time(chapter):
  1461. return float_or_none(
  1462. try_get(
  1463. chapter,
  1464. lambda x: x['chapterRenderer']['timeRangeStartMillis'],
  1465. int),
  1466. scale=1000)
  1467. chapters = []
  1468. for next_num, chapter in enumerate(chapters_list, start=1):
  1469. start_time = chapter_time(chapter)
  1470. if start_time is None:
  1471. continue
  1472. end_time = (chapter_time(chapters_list[next_num])
  1473. if next_num < len(chapters_list) else duration)
  1474. if end_time is None:
  1475. continue
  1476. title = try_get(
  1477. chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
  1478. compat_str)
  1479. chapters.append({
  1480. 'start_time': start_time,
  1481. 'end_time': end_time,
  1482. 'title': title,
  1483. })
  1484. return chapters
  1485. @staticmethod
  1486. def _extract_chapters_from_description(description, duration):
  1487. if not description:
  1488. return None
  1489. chapter_lines = re.findall(
  1490. r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
  1491. description)
  1492. if not chapter_lines:
  1493. return None
  1494. chapters = []
  1495. for next_num, (chapter_line, time_point) in enumerate(
  1496. chapter_lines, start=1):
  1497. start_time = parse_duration(time_point)
  1498. if start_time is None:
  1499. continue
  1500. if start_time > duration:
  1501. break
  1502. end_time = (duration if next_num == len(chapter_lines)
  1503. else parse_duration(chapter_lines[next_num][1]))
  1504. if end_time is None:
  1505. continue
  1506. if end_time > duration:
  1507. end_time = duration
  1508. if start_time > end_time:
  1509. break
  1510. chapter_title = re.sub(
  1511. r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
  1512. chapter_title = re.sub(r'\s+', ' ', chapter_title)
  1513. chapters.append({
  1514. 'start_time': start_time,
  1515. 'end_time': end_time,
  1516. 'title': chapter_title,
  1517. })
  1518. return chapters
  1519. def _extract_chapters(self, webpage, description, video_id, duration):
  1520. return (self._extract_chapters_from_json(webpage, video_id, duration)
  1521. or self._extract_chapters_from_description(description, duration))
  1522. def _real_extract(self, url):
  1523. url, smuggled_data = unsmuggle_url(url, {})
  1524. proto = (
  1525. 'http' if self._downloader.params.get('prefer_insecure', False)
  1526. else 'https')
  1527. start_time = None
  1528. end_time = None
  1529. parsed_url = compat_urllib_parse_urlparse(url)
  1530. for component in [parsed_url.fragment, parsed_url.query]:
  1531. query = compat_parse_qs(component)
  1532. if start_time is None and 't' in query:
  1533. start_time = parse_duration(query['t'][0])
  1534. if start_time is None and 'start' in query:
  1535. start_time = parse_duration(query['start'][0])
  1536. if end_time is None and 'end' in query:
  1537. end_time = parse_duration(query['end'][0])
  1538. # Extract original video URL from URL with redirection, like age verification, using next_url parameter
  1539. mobj = re.search(self._NEXT_URL_RE, url)
  1540. if mobj:
  1541. url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
  1542. video_id = self.extract_id(url)
  1543. # Get video webpage
  1544. url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
  1545. video_webpage, urlh = self._download_webpage_handle(url, video_id)
  1546. qs = compat_parse_qs(compat_urllib_parse_urlparse(urlh.geturl()).query)
  1547. video_id = qs.get('v', [None])[0] or video_id
  1548. # Attempt to extract SWF player URL
  1549. mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
  1550. if mobj is not None:
  1551. player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
  1552. else:
  1553. player_url = None
  1554. dash_mpds = []
  1555. def add_dash_mpd(video_info):
  1556. dash_mpd = video_info.get('dashmpd')
  1557. if dash_mpd and dash_mpd[0] not in dash_mpds:
  1558. dash_mpds.append(dash_mpd[0])
  1559. def add_dash_mpd_pr(pl_response):
  1560. dash_mpd = url_or_none(try_get(
  1561. pl_response, lambda x: x['streamingData']['dashManifestUrl'],
  1562. compat_str))
  1563. if dash_mpd and dash_mpd not in dash_mpds:
  1564. dash_mpds.append(dash_mpd)
  1565. is_live = None
  1566. view_count = None
  1567. def extract_view_count(v_info):
  1568. return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
  1569. def extract_player_response(player_response, video_id):
  1570. pl_response = str_or_none(player_response)
  1571. if not pl_response:
  1572. return
  1573. pl_response = self._parse_json(pl_response, video_id, fatal=False)
  1574. if isinstance(pl_response, dict):
  1575. add_dash_mpd_pr(pl_response)
  1576. return pl_response
  1577. def extract_embedded_config(embed_webpage, video_id):
  1578. embedded_config = self._search_regex(
  1579. r'setConfig\(({.*})\);',
  1580. embed_webpage, 'ytInitialData', default=None)
  1581. if embedded_config:
  1582. return embedded_config
  1583. player_response = {}
  1584. # Get video info
  1585. video_info = {}
  1586. embed_webpage = None
  1587. if (self._og_search_property('restrictions:age', video_webpage, default=None) == '18+'
  1588. or re.search(r'player-age-gate-content">', video_webpage) is not None):
  1589. cookie_keys = self._get_cookies('https://www.youtube.com').keys()
  1590. age_gate = True
  1591. # We simulate the access to the video from www.youtube.com/v/{video_id}
  1592. # this can be viewed without login into Youtube
  1593. url = proto + '://www.youtube.com/embed/%s' % video_id
  1594. embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
  1595. ext = extract_embedded_config(embed_webpage, video_id)
  1596. # playabilityStatus = re.search(r'{\\\"status\\\":\\\"(?P<playabilityStatus>[^\"]+)\\\"', ext)
  1597. playable_in_embed = re.search(r'{\\\"playableInEmbed\\\":(?P<playableinEmbed>[^\,]+)', ext)
  1598. if not playable_in_embed:
  1599. self.to_screen('Could not determine whether playabale in embed for video %s' % video_id)
  1600. playable_in_embed = ''
  1601. else:
  1602. playable_in_embed = playable_in_embed.group('playableinEmbed')
  1603. # check if video is only playable on youtube in other words not playable in embed - if so it requires auth (cookies)
  1604. # if re.search(r'player-unavailable">', embed_webpage) is not None:
  1605. if playable_in_embed == 'false':
  1606. '''
  1607. # TODO apply this patch when Support for Python 2.6(!) and above drops
  1608. if ({'VISITOR_INFO1_LIVE', 'HSID', 'SSID', 'SID'} <= cookie_keys
  1609. or {'VISITOR_INFO1_LIVE', '__Secure-3PSID', 'LOGIN_INFO'} <= cookie_keys):
  1610. '''
  1611. if (set(('VISITOR_INFO1_LIVE', 'HSID', 'SSID', 'SID')) <= set(cookie_keys)
  1612. or set(('VISITOR_INFO1_LIVE', '__Secure-3PSID', 'LOGIN_INFO')) <= set(cookie_keys)):
  1613. age_gate = False
  1614. # Try looking directly into the video webpage
  1615. ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
  1616. if ytplayer_config:
  1617. args = ytplayer_config.get("args")
  1618. if args is not None:
  1619. if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
  1620. # Convert to the same format returned by compat_parse_qs
  1621. video_info = dict((k, [v]) for k, v in args.items())
  1622. add_dash_mpd(video_info)
  1623. # Rental video is not rented but preview is available (e.g.
  1624. # https://www.youtube.com/watch?v=yYr8q0y5Jfg,
  1625. # https://github.com/ytdl-org/youtube-dl/issues/10532)
  1626. if not video_info and args.get('ypc_vid'):
  1627. return self.url_result(
  1628. args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
  1629. if args.get('livestream') == '1' or args.get('live_playback') == 1:
  1630. is_live = True
  1631. if not player_response:
  1632. player_response = extract_player_response(args.get('player_response'), video_id)
  1633. elif not player_response:
  1634. player_response = ytplayer_config
  1635. if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
  1636. add_dash_mpd_pr(player_response)
  1637. else:
  1638. raise ExtractorError('Video is age restricted and only playable on Youtube. Requires cookies!', expected=True)
  1639. else:
  1640. data = compat_urllib_parse_urlencode({
  1641. 'video_id': video_id,
  1642. 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
  1643. 'sts': self._search_regex(
  1644. r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
  1645. })
  1646. video_info_url = proto + '://www.youtube.com/get_video_info?' + data
  1647. try:
  1648. video_info_webpage = self._download_webpage(
  1649. video_info_url, video_id,
  1650. note='Refetching age-gated info webpage',
  1651. errnote='unable to download video info webpage')
  1652. except ExtractorError:
  1653. video_info_webpage = None
  1654. if video_info_webpage:
  1655. video_info = compat_parse_qs(video_info_webpage)
  1656. pl_response = video_info.get('player_response', [None])[0]
  1657. player_response = extract_player_response(pl_response, video_id)
  1658. add_dash_mpd(video_info)
  1659. view_count = extract_view_count(video_info)
  1660. else:
  1661. age_gate = False
  1662. # Try looking directly into the video webpage
  1663. ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
  1664. if ytplayer_config:
  1665. args = ytplayer_config.get('args', {})
  1666. if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
  1667. # Convert to the same format returned by compat_parse_qs
  1668. video_info = dict((k, [v]) for k, v in args.items())
  1669. add_dash_mpd(video_info)
  1670. # Rental video is not rented but preview is available (e.g.
  1671. # https://www.youtube.com/watch?v=yYr8q0y5Jfg,
  1672. # https://github.com/ytdl-org/youtube-dl/issues/10532)
  1673. if not video_info and args.get('ypc_vid'):
  1674. return self.url_result(
  1675. args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
  1676. if args.get('livestream') == '1' or args.get('live_playback') == 1:
  1677. is_live = True
  1678. if not player_response:
  1679. player_response = extract_player_response(args.get('player_response'), video_id)
  1680. if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
  1681. add_dash_mpd_pr(player_response)
  1682. if not video_info and not player_response:
  1683. player_response = extract_player_response(
  1684. self._search_regex(
  1685. r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;', video_webpage,
  1686. 'initial player response', default='{}'),
  1687. video_id)
  1688. def extract_unavailable_message():
  1689. messages = []
  1690. for tag, kind in (('h1', 'message'), ('div', 'submessage')):
  1691. msg = self._html_search_regex(
  1692. r'(?s)<{tag}[^>]+id=["\']unavailable-{kind}["\'][^>]*>(.+?)</{tag}>'.format(tag=tag, kind=kind),
  1693. video_webpage, 'unavailable %s' % kind, default=None)
  1694. if msg:
  1695. messages.append(msg)
  1696. if messages:
  1697. return '\n'.join(messages)
  1698. if not video_info and not player_response:
  1699. unavailable_message = extract_unavailable_message()
  1700. if not unavailable_message:
  1701. unavailable_message = 'Unable to extract video data'
  1702. raise ExtractorError(
  1703. 'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id)
  1704. if not isinstance(video_info, dict):
  1705. video_info = {}
  1706. video_details = try_get(
  1707. player_response, lambda x: x['videoDetails'], dict) or {}
  1708. microformat = try_get(
  1709. player_response, lambda x: x['microformat']['playerMicroformatRenderer'], dict) or {}
  1710. video_title = video_info.get('title', [None])[0] or video_details.get('title')
  1711. if not video_title:
  1712. self._downloader.report_warning('Unable to extract video title')
  1713. video_title = '_'
  1714. description_original = video_description = get_element_by_id("eow-description", video_webpage)
  1715. if video_description:
  1716. def replace_url(m):
  1717. redir_url = compat_urlparse.urljoin(url, m.group(1))
  1718. parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
  1719. if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
  1720. qs = compat_parse_qs(parsed_redir_url.query)
  1721. q = qs.get('q')
  1722. if q and q[0]:
  1723. return q[0]
  1724. return redir_url
  1725. description_original = video_description = re.sub(r'''(?x)
  1726. <a\s+
  1727. (?:[a-zA-Z-]+="[^"]*"\s+)*?
  1728. (?:title|href)="([^"]+)"\s+
  1729. (?:[a-zA-Z-]+="[^"]*"\s+)*?
  1730. class="[^"]*"[^>]*>
  1731. [^<]+\.{3}\s*
  1732. </a>
  1733. ''', replace_url, video_description)
  1734. video_description = clean_html(video_description)
  1735. else:
  1736. video_description = video_details.get('shortDescription')
  1737. if video_description is None:
  1738. video_description = self._html_search_meta('description', video_webpage)
  1739. if not smuggled_data.get('force_singlefeed', False):
  1740. if not self._downloader.params.get('noplaylist'):
  1741. multifeed_metadata_list = try_get(
  1742. player_response,
  1743. lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
  1744. compat_str) or try_get(
  1745. video_info, lambda x: x['multifeed_metadata_list'][0], compat_str)
  1746. if multifeed_metadata_list:
  1747. entries = []
  1748. feed_ids = []
  1749. for feed in multifeed_metadata_list.split(','):
  1750. # Unquote should take place before split on comma (,) since textual
  1751. # fields may contain comma as well (see
  1752. # https://github.com/ytdl-org/youtube-dl/issues/8536)
  1753. feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
  1754. def feed_entry(name):
  1755. return try_get(feed_data, lambda x: x[name][0], compat_str)
  1756. feed_id = feed_entry('id')
  1757. if not feed_id:
  1758. continue
  1759. feed_title = feed_entry('title')
  1760. title = video_title
  1761. if feed_title:
  1762. title += ' (%s)' % feed_title
  1763. entries.append({
  1764. '_type': 'url_transparent',
  1765. 'ie_key': 'Youtube',
  1766. 'url': smuggle_url(
  1767. '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
  1768. {'force_singlefeed': True}),
  1769. 'title': title,
  1770. })
  1771. feed_ids.append(feed_id)
  1772. self.to_screen(
  1773. 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
  1774. % (', '.join(feed_ids), video_id))
  1775. return self.playlist_result(entries, video_id, video_title, video_description)
  1776. else:
  1777. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  1778. if view_count is None:
  1779. view_count = extract_view_count(video_info)
  1780. if view_count is None and video_details:
  1781. view_count = int_or_none(video_details.get('viewCount'))
  1782. if view_count is None and microformat:
  1783. view_count = int_or_none(microformat.get('viewCount'))
  1784. if is_live is None:
  1785. is_live = bool_or_none(video_details.get('isLive'))
  1786. has_live_chat_replay = False
  1787. if not is_live:
  1788. yt_initial_data = self._get_yt_initial_data(video_id, video_webpage)
  1789. try:
  1790. yt_initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
  1791. has_live_chat_replay = True
  1792. except (KeyError, IndexError, TypeError):
  1793. pass
  1794. # Check for "rental" videos
  1795. if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
  1796. raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True)
  1797. def _extract_filesize(media_url):
  1798. return int_or_none(self._search_regex(
  1799. r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
  1800. streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list) or []
  1801. streaming_formats.extend(try_get(player_response, lambda x: x['streamingData']['adaptiveFormats'], list) or [])
  1802. if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
  1803. self.report_rtmp_download()
  1804. formats = [{
  1805. 'format_id': '_rtmp',
  1806. 'protocol': 'rtmp',
  1807. 'url': video_info['conn'][0],
  1808. 'player_url': player_url,
  1809. }]
  1810. elif not is_live and (streaming_formats or len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
  1811. encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
  1812. if 'rtmpe%3Dyes' in encoded_url_map:
  1813. raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True)
  1814. formats = []
  1815. formats_spec = {}
  1816. fmt_list = video_info.get('fmt_list', [''])[0]
  1817. if fmt_list:
  1818. for fmt in fmt_list.split(','):
  1819. spec = fmt.split('/')
  1820. if len(spec) > 1:
  1821. width_height = spec[1].split('x')
  1822. if len(width_height) == 2:
  1823. formats_spec[spec[0]] = {
  1824. 'resolution': spec[1],
  1825. 'width': int_or_none(width_height[0]),
  1826. 'height': int_or_none(width_height[1]),
  1827. }
  1828. for fmt in streaming_formats:
  1829. itag = str_or_none(fmt.get('itag'))
  1830. if not itag:
  1831. continue
  1832. quality = fmt.get('quality')
  1833. quality_label = fmt.get('qualityLabel') or quality
  1834. formats_spec[itag] = {
  1835. 'asr': int_or_none(fmt.get('audioSampleRate')),
  1836. 'filesize': int_or_none(fmt.get('contentLength')),
  1837. 'format_note': quality_label,
  1838. 'fps': int_or_none(fmt.get('fps')),
  1839. 'height': int_or_none(fmt.get('height')),
  1840. # bitrate for itag 43 is always 2147483647
  1841. 'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None,
  1842. 'width': int_or_none(fmt.get('width')),
  1843. }
  1844. for fmt in streaming_formats:
  1845. if fmt.get('drmFamilies') or fmt.get('drm_families'):
  1846. continue
  1847. url = url_or_none(fmt.get('url'))
  1848. if not url:
  1849. cipher = fmt.get('cipher') or fmt.get('signatureCipher')
  1850. if not cipher:
  1851. continue
  1852. url_data = compat_parse_qs(cipher)
  1853. url = url_or_none(try_get(url_data, lambda x: x['url'][0], compat_str))
  1854. if not url:
  1855. continue
  1856. else:
  1857. cipher = None
  1858. url_data = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
  1859. stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
  1860. # Unsupported FORMAT_STREAM_TYPE_OTF
  1861. if stream_type == 3:
  1862. continue
  1863. format_id = fmt.get('itag') or url_data['itag'][0]
  1864. if not format_id:
  1865. continue
  1866. format_id = compat_str(format_id)
  1867. if cipher:
  1868. if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
  1869. ASSETS_RE = (
  1870. r'<script[^>]+\bsrc=("[^"]+")[^>]+\bname=["\']player_ias/base',
  1871. r'"jsUrl"\s*:\s*("[^"]+")',
  1872. r'"assets":.+?"js":\s*("[^"]+")')
  1873. jsplayer_url_json = self._search_regex(
  1874. ASSETS_RE,
  1875. embed_webpage if age_gate else video_webpage,
  1876. 'JS player URL (1)', default=None)
  1877. if not jsplayer_url_json and not age_gate:
  1878. # We need the embed website after all
  1879. if embed_webpage is None:
  1880. embed_url = proto + '://www.youtube.com/embed/%s' % video_id
  1881. embed_webpage = self._download_webpage(
  1882. embed_url, video_id, 'Downloading embed webpage')
  1883. jsplayer_url_json = self._search_regex(
  1884. ASSETS_RE, embed_webpage, 'JS player URL')
  1885. player_url = json.loads(jsplayer_url_json)
  1886. if player_url is None:
  1887. player_url_json = self._search_regex(
  1888. r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
  1889. video_webpage, 'age gate player URL')
  1890. player_url = json.loads(player_url_json)
  1891. if 'sig' in url_data:
  1892. url += '&signature=' + url_data['sig'][0]
  1893. elif 's' in url_data:
  1894. encrypted_sig = url_data['s'][0]
  1895. if self._downloader.params.get('verbose'):
  1896. if player_url is None:
  1897. player_desc = 'unknown'
  1898. else:
  1899. player_type, player_version = self._extract_player_info(player_url)
  1900. player_desc = '%s player %s' % ('flash' if player_type == 'swf' else 'html5', player_version)
  1901. parts_sizes = self._signature_cache_id(encrypted_sig)
  1902. self.to_screen('{%s} signature length %s, %s' %
  1903. (format_id, parts_sizes, player_desc))
  1904. signature = self._decrypt_signature(
  1905. encrypted_sig, video_id, player_url, age_gate)
  1906. sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature'
  1907. url += '&%s=%s' % (sp, signature)
  1908. if 'ratebypass' not in url:
  1909. url += '&ratebypass=yes'
  1910. dct = {
  1911. 'format_id': format_id,
  1912. 'url': url,
  1913. 'player_url': player_url,
  1914. }
  1915. if format_id in self._formats:
  1916. dct.update(self._formats[format_id])
  1917. if format_id in formats_spec:
  1918. dct.update(formats_spec[format_id])
  1919. # Some itags are not included in DASH manifest thus corresponding formats will
  1920. # lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993).
  1921. # Trying to extract metadata from url_encoded_fmt_stream_map entry.
  1922. mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
  1923. width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
  1924. if width is None:
  1925. width = int_or_none(fmt.get('width'))
  1926. if height is None:
  1927. height = int_or_none(fmt.get('height'))
  1928. filesize = int_or_none(url_data.get(
  1929. 'clen', [None])[0]) or _extract_filesize(url)
  1930. quality = url_data.get('quality', [None])[0] or fmt.get('quality')
  1931. quality_label = url_data.get('quality_label', [None])[0] or fmt.get('qualityLabel')
  1932. tbr = (float_or_none(url_data.get('bitrate', [None])[0], 1000)
  1933. or float_or_none(fmt.get('bitrate'), 1000)) if format_id != '43' else None
  1934. fps = int_or_none(url_data.get('fps', [None])[0]) or int_or_none(fmt.get('fps'))
  1935. more_fields = {
  1936. 'filesize': filesize,
  1937. 'tbr': tbr,
  1938. 'width': width,
  1939. 'height': height,
  1940. 'fps': fps,
  1941. 'format_note': quality_label or quality,
  1942. }
  1943. for key, value in more_fields.items():
  1944. if value:
  1945. dct[key] = value
  1946. type_ = url_data.get('type', [None])[0] or fmt.get('mimeType')
  1947. if type_:
  1948. type_split = type_.split(';')
  1949. kind_ext = type_split[0].split('/')
  1950. if len(kind_ext) == 2:
  1951. kind, _ = kind_ext
  1952. dct['ext'] = mimetype2ext(type_split[0])
  1953. if kind in ('audio', 'video'):
  1954. codecs = None
  1955. for mobj in re.finditer(
  1956. r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
  1957. if mobj.group('key') == 'codecs':
  1958. codecs = mobj.group('val')
  1959. break
  1960. if codecs:
  1961. dct.update(parse_codecs(codecs))
  1962. if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
  1963. dct['downloader_options'] = {
  1964. # Youtube throttles chunks >~10M
  1965. 'http_chunk_size': 10485760,
  1966. }
  1967. formats.append(dct)
  1968. else:
  1969. manifest_url = (
  1970. url_or_none(try_get(
  1971. player_response,
  1972. lambda x: x['streamingData']['hlsManifestUrl'],
  1973. compat_str))
  1974. or url_or_none(try_get(
  1975. video_info, lambda x: x['hlsvp'][0], compat_str)))
  1976. if manifest_url:
  1977. formats = []
  1978. m3u8_formats = self._extract_m3u8_formats(
  1979. manifest_url, video_id, 'mp4', fatal=False)
  1980. for a_format in m3u8_formats:
  1981. itag = self._search_regex(
  1982. r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
  1983. if itag:
  1984. a_format['format_id'] = itag
  1985. if itag in self._formats:
  1986. dct = self._formats[itag].copy()
  1987. dct.update(a_format)
  1988. a_format = dct
  1989. a_format['player_url'] = player_url
  1990. # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
  1991. a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
  1992. if self._downloader.params.get('youtube_include_hls_manifest', True):
  1993. formats.append(a_format)
  1994. else:
  1995. error_message = extract_unavailable_message()
  1996. if not error_message:
  1997. reason_list = try_get(
  1998. player_response,
  1999. lambda x: x['playabilityStatus']['errorScreen']['playerErrorMessageRenderer']['subreason']['runs'],
  2000. list) or []
  2001. for reason in reason_list:
  2002. if not isinstance(reason, dict):
  2003. continue
  2004. reason_text = try_get(reason, lambda x: x['text'], compat_str)
  2005. if reason_text:
  2006. if not error_message:
  2007. error_message = ''
  2008. error_message += reason_text
  2009. if error_message:
  2010. error_message = clean_html(error_message)
  2011. if not error_message:
  2012. error_message = clean_html(try_get(
  2013. player_response, lambda x: x['playabilityStatus']['reason'],
  2014. compat_str))
  2015. if not error_message:
  2016. error_message = clean_html(
  2017. try_get(video_info, lambda x: x['reason'][0], compat_str))
  2018. if error_message:
  2019. raise ExtractorError(error_message, expected=True)
  2020. raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info')
  2021. # uploader
  2022. video_uploader = try_get(
  2023. video_info, lambda x: x['author'][0],
  2024. compat_str) or str_or_none(video_details.get('author'))
  2025. if video_uploader:
  2026. video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
  2027. else:
  2028. self._downloader.report_warning('unable to extract uploader name')
  2029. # uploader_id
  2030. video_uploader_id = None
  2031. video_uploader_url = None
  2032. mobj = re.search(
  2033. r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
  2034. video_webpage)
  2035. if mobj is not None:
  2036. video_uploader_id = mobj.group('uploader_id')
  2037. video_uploader_url = mobj.group('uploader_url')
  2038. else:
  2039. owner_profile_url = url_or_none(microformat.get('ownerProfileUrl'))
  2040. if owner_profile_url:
  2041. video_uploader_id = self._search_regex(
  2042. r'(?:user|channel)/([^/]+)', owner_profile_url, 'uploader id',
  2043. default=None)
  2044. video_uploader_url = owner_profile_url
  2045. channel_id = (
  2046. str_or_none(video_details.get('channelId'))
  2047. or self._html_search_meta(
  2048. 'channelId', video_webpage, 'channel id', default=None)
  2049. or self._search_regex(
  2050. r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
  2051. video_webpage, 'channel id', default=None, group='id'))
  2052. channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
  2053. thumbnails = []
  2054. thumbnails_list = try_get(
  2055. video_details, lambda x: x['thumbnail']['thumbnails'], list) or []
  2056. for t in thumbnails_list:
  2057. if not isinstance(t, dict):
  2058. continue
  2059. thumbnail_url = url_or_none(t.get('url'))
  2060. if not thumbnail_url:
  2061. continue
  2062. thumbnails.append({
  2063. 'url': thumbnail_url,
  2064. 'width': int_or_none(t.get('width')),
  2065. 'height': int_or_none(t.get('height')),
  2066. })
  2067. if not thumbnails:
  2068. video_thumbnail = None
  2069. # We try first to get a high quality image:
  2070. m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
  2071. video_webpage, re.DOTALL)
  2072. if m_thumb is not None:
  2073. video_thumbnail = m_thumb.group(1)
  2074. thumbnail_url = try_get(video_info, lambda x: x['thumbnail_url'][0], compat_str)
  2075. if thumbnail_url:
  2076. video_thumbnail = compat_urllib_parse_unquote_plus(thumbnail_url)
  2077. if video_thumbnail:
  2078. thumbnails.append({'url': video_thumbnail})
  2079. # upload date
  2080. upload_date = self._html_search_meta(
  2081. 'datePublished', video_webpage, 'upload date', default=None)
  2082. if not upload_date:
  2083. upload_date = self._search_regex(
  2084. [r'(?s)id="eow-date.*?>(.*?)</span>',
  2085. r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
  2086. video_webpage, 'upload date', default=None)
  2087. if not upload_date:
  2088. upload_date = microformat.get('publishDate') or microformat.get('uploadDate')
  2089. upload_date = unified_strdate(upload_date)
  2090. video_license = self._html_search_regex(
  2091. r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
  2092. video_webpage, 'license', default=None)
  2093. m_music = re.search(
  2094. r'''(?x)
  2095. <h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
  2096. <ul[^>]*>\s*
  2097. <li>(?P<title>.+?)
  2098. by (?P<creator>.+?)
  2099. (?:
  2100. \(.+?\)|
  2101. <a[^>]*
  2102. (?:
  2103. \bhref=["\']/red[^>]*>| # drop possible
  2104. >\s*Listen ad-free with YouTube Red # YouTube Red ad
  2105. )
  2106. .*?
  2107. )?</li
  2108. ''',
  2109. video_webpage)
  2110. if m_music:
  2111. video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
  2112. video_creator = clean_html(m_music.group('creator'))
  2113. else:
  2114. video_alt_title = video_creator = None
  2115. def extract_meta(field):
  2116. return self._html_search_regex(
  2117. r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
  2118. video_webpage, field, default=None)
  2119. track = extract_meta('Song')
  2120. artist = extract_meta('Artist')
  2121. album = extract_meta('Album')
  2122. # Youtube Music Auto-generated description
  2123. release_date = release_year = None
  2124. if video_description:
  2125. mobj = re.search(r'(?s)Provided to YouTube by [^\n]+\n+(?P<track>[^·]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?', video_description)
  2126. if mobj:
  2127. if not track:
  2128. track = mobj.group('track').strip()
  2129. if not artist:
  2130. artist = mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·'))
  2131. if not album:
  2132. album = mobj.group('album'.strip())
  2133. release_year = mobj.group('release_year')
  2134. release_date = mobj.group('release_date')
  2135. if release_date:
  2136. release_date = release_date.replace('-', '')
  2137. if not release_year:
  2138. release_year = int(release_date[:4])
  2139. if release_year:
  2140. release_year = int(release_year)
  2141. yt_initial = self._get_yt_initial_data(video_id, video_webpage)
  2142. if yt_initial:
  2143. music_metadata = self._get_music_metadata_from_yt_initial(yt_initial)
  2144. if len(music_metadata):
  2145. album = music_metadata[0].get('album')
  2146. artist = music_metadata[0].get('artist')
  2147. track = music_metadata[0].get('track')
  2148. m_episode = re.search(
  2149. r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
  2150. video_webpage)
  2151. if m_episode:
  2152. series = unescapeHTML(m_episode.group('series'))
  2153. season_number = int(m_episode.group('season'))
  2154. episode_number = int(m_episode.group('episode'))
  2155. else:
  2156. series = season_number = episode_number = None
  2157. m_cat_container = self._search_regex(
  2158. r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
  2159. video_webpage, 'categories', default=None)
  2160. category = None
  2161. if m_cat_container:
  2162. category = self._html_search_regex(
  2163. r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
  2164. default=None)
  2165. if not category:
  2166. category = try_get(
  2167. microformat, lambda x: x['category'], compat_str)
  2168. video_categories = None if category is None else [category]
  2169. video_tags = [
  2170. unescapeHTML(m.group('content'))
  2171. for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
  2172. if not video_tags:
  2173. video_tags = try_get(video_details, lambda x: x['keywords'], list)
  2174. def _extract_count(count_name):
  2175. return str_to_int(self._search_regex(
  2176. (r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>' % re.escape(count_name),
  2177. r'["\']label["\']\s*:\s*["\']([\d,.]+)\s+%ss["\']' % re.escape(count_name)),
  2178. video_webpage, count_name, default=None))
  2179. like_count = _extract_count('like')
  2180. dislike_count = _extract_count('dislike')
  2181. if view_count is None:
  2182. view_count = str_to_int(self._search_regex(
  2183. r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
  2184. 'view count', default=None))
  2185. average_rating = (
  2186. float_or_none(video_details.get('averageRating'))
  2187. or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0])))
  2188. # subtitles
  2189. video_subtitles = self.extract_subtitles(
  2190. video_id, video_webpage, has_live_chat_replay)
  2191. automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
  2192. video_duration = try_get(
  2193. video_info, lambda x: int_or_none(x['length_seconds'][0]))
  2194. if not video_duration:
  2195. video_duration = int_or_none(video_details.get('lengthSeconds'))
  2196. if not video_duration:
  2197. video_duration = parse_duration(self._html_search_meta(
  2198. 'duration', video_webpage, 'video duration'))
  2199. # Get Subscriber Count of channel
  2200. subscriber_count = parse_count(self._search_regex(
  2201. r'"text":"([\d\.]+\w?) subscribers"',
  2202. video_webpage,
  2203. 'subscriber count',
  2204. default=None
  2205. ))
  2206. # annotations
  2207. video_annotations = None
  2208. if self._downloader.params.get('writeannotations', False):
  2209. xsrf_token = self._search_regex(
  2210. r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>[A-Za-z0-9+/=]+)\2',
  2211. video_webpage, 'xsrf token', group='xsrf_token', fatal=False)
  2212. invideo_url = try_get(
  2213. player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str)
  2214. if xsrf_token and invideo_url:
  2215. xsrf_field_name = self._search_regex(
  2216. r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2',
  2217. video_webpage, 'xsrf field name',
  2218. group='xsrf_field_name', default='session_token')
  2219. video_annotations = self._download_webpage(
  2220. self._proto_relative_url(invideo_url),
  2221. video_id, note='Downloading annotations',
  2222. errnote='Unable to download video annotations', fatal=False,
  2223. data=urlencode_postdata({xsrf_field_name: xsrf_token}))
  2224. chapters = self._extract_chapters(video_webpage, description_original, video_id, video_duration)
  2225. # Look for the DASH manifest
  2226. if self._downloader.params.get('youtube_include_dash_manifest', True):
  2227. dash_mpd_fatal = True
  2228. for mpd_url in dash_mpds:
  2229. dash_formats = {}
  2230. try:
  2231. def decrypt_sig(mobj):
  2232. s = mobj.group(1)
  2233. dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
  2234. return '/signature/%s' % dec_s
  2235. mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
  2236. for df in self._extract_mpd_formats(
  2237. mpd_url, video_id, fatal=dash_mpd_fatal,
  2238. formats_dict=self._formats):
  2239. if not df.get('filesize'):
  2240. df['filesize'] = _extract_filesize(df['url'])
  2241. # Do not overwrite DASH format found in some previous DASH manifest
  2242. if df['format_id'] not in dash_formats:
  2243. dash_formats[df['format_id']] = df
  2244. # Additional DASH manifests may end up in HTTP Error 403 therefore
  2245. # allow them to fail without bug report message if we already have
  2246. # some DASH manifest succeeded. This is temporary workaround to reduce
  2247. # burst of bug reports until we figure out the reason and whether it
  2248. # can be fixed at all.
  2249. dash_mpd_fatal = False
  2250. except (ExtractorError, KeyError) as e:
  2251. self.report_warning(
  2252. 'Skipping DASH manifest: %r' % e, video_id)
  2253. if dash_formats:
  2254. # Remove the formats we found through non-DASH, they
  2255. # contain less info and it can be wrong, because we use
  2256. # fixed values (for example the resolution). See
  2257. # https://github.com/ytdl-org/youtube-dl/issues/5774 for an
  2258. # example.
  2259. formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
  2260. formats.extend(dash_formats.values())
  2261. # Check for malformed aspect ratio
  2262. stretched_m = re.search(
  2263. r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
  2264. video_webpage)
  2265. if stretched_m:
  2266. w = float(stretched_m.group('w'))
  2267. h = float(stretched_m.group('h'))
  2268. # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
  2269. # We will only process correct ratios.
  2270. if w > 0 and h > 0:
  2271. ratio = w / h
  2272. for f in formats:
  2273. if f.get('vcodec') != 'none':
  2274. f['stretched_ratio'] = ratio
  2275. if not formats:
  2276. if 'reason' in video_info:
  2277. if 'The uploader has not made this video available in your country.' in video_info['reason']:
  2278. regions_allowed = self._html_search_meta(
  2279. 'regionsAllowed', video_webpage, default=None)
  2280. countries = regions_allowed.split(',') if regions_allowed else None
  2281. self.raise_geo_restricted(
  2282. msg=video_info['reason'][0], countries=countries)
  2283. reason = video_info['reason'][0]
  2284. if 'Invalid parameters' in reason:
  2285. unavailable_message = extract_unavailable_message()
  2286. if unavailable_message:
  2287. reason = unavailable_message
  2288. raise ExtractorError(
  2289. 'YouTube said: %s' % reason,
  2290. expected=True, video_id=video_id)
  2291. if video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos']):
  2292. raise ExtractorError('This video is DRM protected.', expected=True)
  2293. self._sort_formats(formats)
  2294. self.mark_watched(video_id, video_info, player_response)
  2295. return {
  2296. 'id': video_id,
  2297. 'uploader': video_uploader,
  2298. 'uploader_id': video_uploader_id,
  2299. 'uploader_url': video_uploader_url,
  2300. 'channel_id': channel_id,
  2301. 'channel_url': channel_url,
  2302. 'upload_date': upload_date,
  2303. 'license': video_license,
  2304. 'creator': video_creator or artist,
  2305. 'title': video_title,
  2306. 'alt_title': video_alt_title or track,
  2307. 'thumbnails': thumbnails,
  2308. 'description': video_description,
  2309. 'categories': video_categories,
  2310. 'tags': video_tags,
  2311. 'subtitles': video_subtitles,
  2312. 'automatic_captions': automatic_captions,
  2313. 'duration': video_duration,
  2314. 'age_limit': 18 if age_gate else 0,
  2315. 'annotations': video_annotations,
  2316. 'chapters': chapters,
  2317. 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
  2318. 'view_count': view_count,
  2319. 'like_count': like_count,
  2320. 'dislike_count': dislike_count,
  2321. 'average_rating': average_rating,
  2322. 'formats': formats,
  2323. 'is_live': is_live,
  2324. 'start_time': start_time,
  2325. 'end_time': end_time,
  2326. 'series': series,
  2327. 'season_number': season_number,
  2328. 'episode_number': episode_number,
  2329. 'track': track,
  2330. 'artist': artist,
  2331. 'album': album,
  2332. 'release_date': release_date,
  2333. 'release_year': release_year,
  2334. 'subscriber_count': subscriber_count,
  2335. }
  2336. class YoutubeTabIE(YoutubeBaseInfoExtractor):
  2337. IE_DESC = 'YouTube.com tab'
  2338. _VALID_URL = (
  2339. r'https?://(?:\w+\.)?(?:youtube(?:kids)?\.com|invidio\.us)/'
  2340. r'(?:(?!(%s)([/#?]|$))|'
  2341. r'(?:channel|c|user)/|'
  2342. r'(?:playlist|watch)\?.*?\blist=)'
  2343. r'(?P<id>[^/?#&]+)') % YoutubeBaseInfoExtractor._RESERVED_NAMES
  2344. IE_NAME = 'youtube:tab'
  2345. _TESTS = [{
  2346. # playlists, multipage
  2347. 'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
  2348. 'playlist_mincount': 94,
  2349. 'info_dict': {
  2350. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2351. 'title': 'Игорь Клейнер - Playlists',
  2352. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2353. },
  2354. }, {
  2355. # playlists, multipage, different order
  2356. 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
  2357. 'playlist_mincount': 94,
  2358. 'info_dict': {
  2359. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2360. 'title': 'Игорь Клейнер - Playlists',
  2361. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2362. },
  2363. }, {
  2364. # playlists, singlepage
  2365. 'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
  2366. 'playlist_mincount': 4,
  2367. 'info_dict': {
  2368. 'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
  2369. 'title': 'ThirstForScience - Playlists',
  2370. 'description': 'md5:609399d937ea957b0f53cbffb747a14c',
  2371. }
  2372. }, {
  2373. 'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
  2374. 'only_matching': True,
  2375. }, {
  2376. # basic, single video playlist
  2377. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2378. 'info_dict': {
  2379. 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2380. 'uploader': 'Sergey M.',
  2381. 'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2382. 'title': 'youtube-dl public playlist',
  2383. },
  2384. 'playlist_count': 1,
  2385. }, {
  2386. # empty playlist
  2387. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2388. 'info_dict': {
  2389. 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2390. 'uploader': 'Sergey M.',
  2391. 'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2392. 'title': 'youtube-dl empty playlist',
  2393. },
  2394. 'playlist_count': 0,
  2395. }, {
  2396. # Home tab
  2397. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
  2398. 'info_dict': {
  2399. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2400. 'title': 'lex will - Home',
  2401. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2402. },
  2403. 'playlist_mincount': 2,
  2404. }, {
  2405. # Videos tab
  2406. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
  2407. 'info_dict': {
  2408. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2409. 'title': 'lex will - Videos',
  2410. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2411. },
  2412. 'playlist_mincount': 975,
  2413. }, {
  2414. # Videos tab, sorted by popular
  2415. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
  2416. 'info_dict': {
  2417. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2418. 'title': 'lex will - Videos',
  2419. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2420. },
  2421. 'playlist_mincount': 199,
  2422. }, {
  2423. # Playlists tab
  2424. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
  2425. 'info_dict': {
  2426. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2427. 'title': 'lex will - Playlists',
  2428. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2429. },
  2430. 'playlist_mincount': 17,
  2431. }, {
  2432. # Community tab
  2433. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
  2434. 'info_dict': {
  2435. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2436. 'title': 'lex will - Community',
  2437. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2438. },
  2439. 'playlist_mincount': 18,
  2440. }, {
  2441. # Channels tab
  2442. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
  2443. 'info_dict': {
  2444. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2445. 'title': 'lex will - Channels',
  2446. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2447. },
  2448. 'playlist_mincount': 138,
  2449. }, {
  2450. 'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2451. 'only_matching': True,
  2452. }, {
  2453. 'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2454. 'only_matching': True,
  2455. }, {
  2456. 'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2457. 'only_matching': True,
  2458. }, {
  2459. 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
  2460. 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2461. 'info_dict': {
  2462. 'title': '29C3: Not my department',
  2463. 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2464. 'uploader': 'Christiaan008',
  2465. 'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
  2466. },
  2467. 'playlist_count': 96,
  2468. }, {
  2469. 'note': 'Large playlist',
  2470. 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
  2471. 'info_dict': {
  2472. 'title': 'Uploads from Cauchemar',
  2473. 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
  2474. 'uploader': 'Cauchemar',
  2475. 'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
  2476. },
  2477. 'playlist_mincount': 1123,
  2478. }, {
  2479. # even larger playlist, 8832 videos
  2480. 'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
  2481. 'only_matching': True,
  2482. }, {
  2483. 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
  2484. 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
  2485. 'info_dict': {
  2486. 'title': 'Uploads from Interstellar Movie',
  2487. 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
  2488. 'uploader': 'Interstellar Movie',
  2489. 'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
  2490. },
  2491. 'playlist_mincount': 21,
  2492. }, {
  2493. # https://github.com/ytdl-org/youtube-dl/issues/21844
  2494. 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2495. 'info_dict': {
  2496. 'title': 'Data Analysis with Dr Mike Pound',
  2497. 'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2498. 'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
  2499. 'uploader': 'Computerphile',
  2500. },
  2501. 'playlist_mincount': 11,
  2502. }, {
  2503. 'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2504. 'only_matching': True,
  2505. }, {
  2506. # Playlist URL that does not actually serve a playlist
  2507. 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
  2508. 'info_dict': {
  2509. 'id': 'FqZTN594JQw',
  2510. 'ext': 'webm',
  2511. 'title': "Smiley's People 01 detective, Adventure Series, Action",
  2512. 'uploader': 'STREEM',
  2513. 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
  2514. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
  2515. 'upload_date': '20150526',
  2516. 'license': 'Standard YouTube License',
  2517. 'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
  2518. 'categories': ['People & Blogs'],
  2519. 'tags': list,
  2520. 'view_count': int,
  2521. 'like_count': int,
  2522. 'dislike_count': int,
  2523. },
  2524. 'params': {
  2525. 'skip_download': True,
  2526. },
  2527. 'skip': 'This video is not available.',
  2528. 'add_ie': [YoutubeIE.ie_key()],
  2529. }, {
  2530. 'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
  2531. 'only_matching': True,
  2532. }, {
  2533. 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
  2534. 'only_matching': True,
  2535. }, {
  2536. 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
  2537. 'info_dict': {
  2538. 'id': '9Auq9mYxFEE',
  2539. 'ext': 'mp4',
  2540. 'title': 'Watch Sky News live',
  2541. 'uploader': 'Sky News',
  2542. 'uploader_id': 'skynews',
  2543. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
  2544. 'upload_date': '20191102',
  2545. 'description': 'md5:78de4e1c2359d0ea3ed829678e38b662',
  2546. 'categories': ['News & Politics'],
  2547. 'tags': list,
  2548. 'like_count': int,
  2549. 'dislike_count': int,
  2550. },
  2551. 'params': {
  2552. 'skip_download': True,
  2553. },
  2554. }, {
  2555. 'url': 'https://www.youtube.com/user/TheYoungTurks/live',
  2556. 'info_dict': {
  2557. 'id': 'a48o2S1cPoo',
  2558. 'ext': 'mp4',
  2559. 'title': 'The Young Turks - Live Main Show',
  2560. 'uploader': 'The Young Turks',
  2561. 'uploader_id': 'TheYoungTurks',
  2562. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
  2563. 'upload_date': '20150715',
  2564. 'license': 'Standard YouTube License',
  2565. 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
  2566. 'categories': ['News & Politics'],
  2567. 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
  2568. 'like_count': int,
  2569. 'dislike_count': int,
  2570. },
  2571. 'params': {
  2572. 'skip_download': True,
  2573. },
  2574. 'only_matching': True,
  2575. }, {
  2576. 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
  2577. 'only_matching': True,
  2578. }, {
  2579. 'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
  2580. 'only_matching': True,
  2581. },
  2582. # TODO
  2583. # {
  2584. # 'url': 'https://www.youtube.com/TheYoungTurks/live',
  2585. # 'only_matching': True,
  2586. # }
  2587. ]
  2588. def _extract_channel_id(self, webpage):
  2589. channel_id = self._html_search_meta(
  2590. 'channelId', webpage, 'channel id', default=None)
  2591. if channel_id:
  2592. return channel_id
  2593. channel_url = self._html_search_meta(
  2594. ('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
  2595. 'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
  2596. 'twitter:app:url:googleplay'), webpage, 'channel url')
  2597. return self._search_regex(
  2598. r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
  2599. channel_url, 'channel id')
  2600. @staticmethod
  2601. def _extract_grid_item_renderer(item):
  2602. for item_kind in ('Playlist', 'Video', 'Channel'):
  2603. renderer = item.get('grid%sRenderer' % item_kind)
  2604. if renderer:
  2605. return renderer
  2606. def _extract_video(self, renderer):
  2607. video_id = renderer.get('videoId')
  2608. title = try_get(
  2609. renderer,
  2610. (lambda x: x['title']['runs'][0]['text'],
  2611. lambda x: x['title']['simpleText']), compat_str)
  2612. description = try_get(
  2613. renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
  2614. compat_str)
  2615. duration = parse_duration(try_get(
  2616. renderer, lambda x: x['lengthText']['simpleText'], compat_str))
  2617. view_count_text = try_get(
  2618. renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
  2619. view_count = str_to_int(self._search_regex(
  2620. r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
  2621. 'view count', default=None))
  2622. uploader = try_get(
  2623. renderer, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
  2624. return {
  2625. '_type': 'url_transparent',
  2626. 'ie_key': YoutubeIE.ie_key(),
  2627. 'id': video_id,
  2628. 'url': video_id,
  2629. 'title': title,
  2630. 'description': description,
  2631. 'duration': duration,
  2632. 'view_count': view_count,
  2633. 'uploader': uploader,
  2634. }
  2635. def _grid_entries(self, grid_renderer):
  2636. for item in grid_renderer['items']:
  2637. if not isinstance(item, dict):
  2638. continue
  2639. renderer = self._extract_grid_item_renderer(item)
  2640. if not isinstance(renderer, dict):
  2641. continue
  2642. title = try_get(
  2643. renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
  2644. # playlist
  2645. playlist_id = renderer.get('playlistId')
  2646. if playlist_id:
  2647. yield self.url_result(
  2648. 'https://www.youtube.com/playlist?list=%s' % playlist_id,
  2649. ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
  2650. video_title=title)
  2651. # video
  2652. video_id = renderer.get('videoId')
  2653. if video_id:
  2654. yield self._extract_video(renderer)
  2655. # channel
  2656. channel_id = renderer.get('channelId')
  2657. if channel_id:
  2658. title = try_get(
  2659. renderer, lambda x: x['title']['simpleText'], compat_str)
  2660. yield self.url_result(
  2661. 'https://www.youtube.com/channel/%s' % channel_id,
  2662. ie=YoutubeTabIE.ie_key(), video_title=title)
  2663. def _shelf_entries_trimmed(self, shelf_renderer):
  2664. renderer = try_get(
  2665. shelf_renderer, lambda x: x['content']['horizontalListRenderer'], dict)
  2666. if not renderer:
  2667. return
  2668. # TODO: add support for nested playlists so each shelf is processed
  2669. # as separate playlist
  2670. # TODO: this includes only first N items
  2671. for entry in self._grid_entries(renderer):
  2672. yield entry
  2673. def _shelf_entries(self, shelf_renderer):
  2674. ep = try_get(
  2675. shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
  2676. compat_str)
  2677. shelf_url = urljoin('https://www.youtube.com', ep)
  2678. if not shelf_url:
  2679. return
  2680. title = try_get(
  2681. shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
  2682. yield self.url_result(shelf_url, video_title=title)
  2683. def _playlist_entries(self, video_list_renderer):
  2684. for content in video_list_renderer['contents']:
  2685. if not isinstance(content, dict):
  2686. continue
  2687. renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
  2688. if not isinstance(renderer, dict):
  2689. continue
  2690. video_id = renderer.get('videoId')
  2691. if not video_id:
  2692. continue
  2693. yield self._extract_video(renderer)
  2694. def _itemSection_entries(self, item_sect_renderer):
  2695. for content in item_sect_renderer['contents']:
  2696. if not isinstance(content, dict):
  2697. continue
  2698. renderer = content.get('videoRenderer', {})
  2699. if not isinstance(renderer, dict):
  2700. continue
  2701. video_id = renderer.get('videoId')
  2702. if not video_id:
  2703. continue
  2704. yield self._extract_video(renderer)
  2705. def _rich_entries(self, rich_grid_renderer):
  2706. renderer = try_get(
  2707. rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict)
  2708. video_id = renderer.get('videoId')
  2709. if not video_id:
  2710. return
  2711. yield self._extract_video(renderer)
  2712. def _video_entry(self, video_renderer):
  2713. video_id = video_renderer.get('videoId')
  2714. if video_id:
  2715. return self._extract_video(video_renderer)
  2716. def _post_thread_entries(self, post_thread_renderer):
  2717. post_renderer = try_get(
  2718. post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
  2719. if not post_renderer:
  2720. return
  2721. # video attachment
  2722. video_renderer = try_get(
  2723. post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
  2724. video_id = None
  2725. if video_renderer:
  2726. entry = self._video_entry(video_renderer)
  2727. if entry:
  2728. yield entry
  2729. # inline video links
  2730. runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
  2731. for run in runs:
  2732. if not isinstance(run, dict):
  2733. continue
  2734. ep_url = try_get(
  2735. run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
  2736. if not ep_url:
  2737. continue
  2738. if not YoutubeIE.suitable(ep_url):
  2739. continue
  2740. ep_video_id = YoutubeIE._match_id(ep_url)
  2741. if video_id == ep_video_id:
  2742. continue
  2743. yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
  2744. def _post_thread_continuation_entries(self, post_thread_continuation):
  2745. contents = post_thread_continuation.get('contents')
  2746. if not isinstance(contents, list):
  2747. return
  2748. for content in contents:
  2749. renderer = content.get('backstagePostThreadRenderer')
  2750. if not isinstance(renderer, dict):
  2751. continue
  2752. for entry in self._post_thread_entries(renderer):
  2753. yield entry
  2754. @staticmethod
  2755. def _extract_next_continuation_data(renderer):
  2756. next_continuation = try_get(
  2757. renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict)
  2758. if not next_continuation:
  2759. return
  2760. continuation = next_continuation.get('continuation')
  2761. if not continuation:
  2762. return
  2763. ctp = next_continuation.get('clickTrackingParams')
  2764. return {
  2765. 'ctoken': continuation,
  2766. 'continuation': continuation,
  2767. 'itct': ctp,
  2768. }
  2769. @classmethod
  2770. def _extract_continuation(cls, renderer):
  2771. next_continuation = cls._extract_next_continuation_data(renderer)
  2772. if next_continuation:
  2773. return next_continuation
  2774. contents = renderer.get('contents')
  2775. if not isinstance(contents, list):
  2776. return
  2777. for content in contents:
  2778. if not isinstance(content, dict):
  2779. continue
  2780. continuation_ep = try_get(
  2781. content, lambda x: x['continuationItemRenderer']['continuationEndpoint'],
  2782. dict)
  2783. if not continuation_ep:
  2784. continue
  2785. continuation = try_get(
  2786. continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
  2787. if not continuation:
  2788. continue
  2789. ctp = continuation_ep.get('clickTrackingParams')
  2790. if not ctp:
  2791. continue
  2792. return {
  2793. 'ctoken': continuation,
  2794. 'continuation': continuation,
  2795. 'itct': ctp,
  2796. }
  2797. def _entries(self, tab, identity_token):
  2798. def extract_entries(parent_renderer):
  2799. slr_contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
  2800. for slr_content in slr_contents:
  2801. if not isinstance(slr_content, dict):
  2802. continue
  2803. is_renderer = try_get(slr_content, lambda x: x['itemSectionRenderer'], dict)
  2804. if not is_renderer:
  2805. renderer = slr_content.get('richItemRenderer')
  2806. if renderer:
  2807. for entry in self._rich_entries(renderer):
  2808. yield entry
  2809. continuation_list[0] = self._extract_continuation(parent_renderer)
  2810. continue
  2811. isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
  2812. for isr_content in isr_contents:
  2813. if not isinstance(isr_content, dict):
  2814. continue
  2815. renderer = isr_content.get('playlistVideoListRenderer')
  2816. if renderer:
  2817. for entry in self._playlist_entries(renderer):
  2818. yield entry
  2819. continuation_list[0] = self._extract_continuation(renderer)
  2820. continue
  2821. renderer = isr_content.get('gridRenderer')
  2822. if renderer:
  2823. for entry in self._grid_entries(renderer):
  2824. yield entry
  2825. continuation_list[0] = self._extract_continuation(renderer)
  2826. continue
  2827. renderer = isr_content.get('shelfRenderer')
  2828. if renderer:
  2829. for entry in self._shelf_entries(renderer):
  2830. yield entry
  2831. continuation_list[0] = self._extract_continuation(parent_renderer)
  2832. continue
  2833. renderer = isr_content.get('backstagePostThreadRenderer')
  2834. if renderer:
  2835. for entry in self._post_thread_entries(renderer):
  2836. yield entry
  2837. continuation_list[0] = self._extract_continuation(renderer)
  2838. continue
  2839. renderer = isr_content.get('videoRenderer')
  2840. if renderer:
  2841. entry = self._video_entry(renderer)
  2842. if entry:
  2843. yield entry
  2844. if not continuation_list[0]:
  2845. continuation_list[0] = self._extract_continuation(is_renderer)
  2846. if not continuation_list[0]:
  2847. continuation_list[0] = self._extract_continuation(parent_renderer)
  2848. continuation_list = [None] # Python 2 doesnot support nonlocal
  2849. parent_renderer = (
  2850. try_get(tab, lambda x: x['sectionListRenderer'], dict)
  2851. or try_get(tab, lambda x: x['richGridRenderer'], dict) or {})
  2852. if parent_renderer:
  2853. for entry in extract_entries(parent_renderer):
  2854. yield entry
  2855. continuation = continuation_list[0]
  2856. headers = {
  2857. 'x-youtube-client-name': '1',
  2858. 'x-youtube-client-version': '2.20201112.04.01',
  2859. }
  2860. if identity_token:
  2861. headers['x-youtube-identity-token'] = identity_token
  2862. for page_num in itertools.count(1):
  2863. if not continuation:
  2864. break
  2865. if hasattr(self, '_MAX_PAGES') and page_num > self._MAX_PAGES:
  2866. break
  2867. browse = self._download_json(
  2868. 'https://www.youtube.com/browse_ajax', None,
  2869. 'Downloading page %d' % page_num,
  2870. headers=headers, query=continuation, fatal=False)
  2871. if not browse:
  2872. break
  2873. response = try_get(browse, lambda x: x[1]['response'], dict)
  2874. if not response:
  2875. break
  2876. continuation_contents = try_get(
  2877. response, lambda x: x['continuationContents'], dict)
  2878. if continuation_contents:
  2879. continuation_renderer = continuation_contents.get('playlistVideoListContinuation')
  2880. if continuation_renderer:
  2881. for entry in self._playlist_entries(continuation_renderer):
  2882. yield entry
  2883. continuation = self._extract_continuation(continuation_renderer)
  2884. continue
  2885. continuation_renderer = continuation_contents.get('gridContinuation')
  2886. if continuation_renderer:
  2887. for entry in self._grid_entries(continuation_renderer):
  2888. yield entry
  2889. continuation = self._extract_continuation(continuation_renderer)
  2890. continue
  2891. continuation_renderer = continuation_contents.get('itemSectionContinuation')
  2892. if continuation_renderer:
  2893. for entry in self._post_thread_continuation_entries(continuation_renderer):
  2894. yield entry
  2895. continuation = self._extract_continuation(continuation_renderer)
  2896. continue
  2897. continuation_renderer = continuation_contents.get('sectionListContinuation')
  2898. if continuation_renderer:
  2899. continuation_list = [None]
  2900. for entry in extract_entries(continuation_renderer):
  2901. yield entry
  2902. continuation = continuation_list[0]
  2903. continue
  2904. continuation_items = try_get(
  2905. response, lambda x: x['onResponseReceivedActions'][0]['appendContinuationItemsAction']['continuationItems'], list)
  2906. if continuation_items:
  2907. continuation_item = continuation_items[0]
  2908. if not isinstance(continuation_item, dict):
  2909. continue
  2910. renderer = continuation_item.get('playlistVideoRenderer')
  2911. if renderer:
  2912. video_list_renderer = {'contents': continuation_items}
  2913. for entry in self._playlist_entries(video_list_renderer):
  2914. yield entry
  2915. continuation = self._extract_continuation(video_list_renderer)
  2916. continue
  2917. renderer = continuation_item.get('itemSectionRenderer')
  2918. if renderer:
  2919. for entry in self._itemSection_entries(renderer):
  2920. yield entry
  2921. continuation = self._extract_continuation({'contents': continuation_items})
  2922. continue
  2923. break
  2924. @staticmethod
  2925. def _extract_selected_tab(tabs):
  2926. for tab in tabs:
  2927. if try_get(tab, lambda x: x['tabRenderer']['selected'], bool):
  2928. return tab['tabRenderer']
  2929. else:
  2930. raise ExtractorError('Unable to find selected tab')
  2931. @staticmethod
  2932. def _extract_uploader(data):
  2933. uploader = {}
  2934. sidebar_renderer = try_get(
  2935. data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list)
  2936. if sidebar_renderer:
  2937. for item in sidebar_renderer:
  2938. if not isinstance(item, dict):
  2939. continue
  2940. renderer = item.get('playlistSidebarSecondaryInfoRenderer')
  2941. if not isinstance(renderer, dict):
  2942. continue
  2943. owner = try_get(
  2944. renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
  2945. if owner:
  2946. uploader['uploader'] = owner.get('text')
  2947. uploader['uploader_id'] = try_get(
  2948. owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
  2949. uploader['uploader_url'] = urljoin(
  2950. 'https://www.youtube.com/',
  2951. try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
  2952. return uploader
  2953. def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token):
  2954. selected_tab = self._extract_selected_tab(tabs)
  2955. renderer = try_get(
  2956. data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
  2957. playlist_id = None
  2958. if renderer:
  2959. channel_title = renderer.get('title') or item_id
  2960. tab_title = selected_tab.get('title')
  2961. title = channel_title or item_id
  2962. if tab_title:
  2963. title += ' - %s' % tab_title
  2964. description = renderer.get('description')
  2965. playlist_id = renderer.get('externalId')
  2966. renderer = try_get(
  2967. data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
  2968. if renderer:
  2969. title = renderer.get('title')
  2970. description = None
  2971. playlist_id = item_id
  2972. if playlist_id is None:
  2973. return None
  2974. playlist = self.playlist_result(
  2975. self._entries(selected_tab['content'], identity_token),
  2976. playlist_id=playlist_id, playlist_title=title,
  2977. playlist_description=description)
  2978. playlist.update(self._extract_uploader(data))
  2979. return playlist
  2980. def _extract_from_playlist(self, item_id, data, playlist):
  2981. title = playlist.get('title') or try_get(
  2982. data, lambda x: x['titleText']['simpleText'], compat_str)
  2983. playlist_id = playlist.get('playlistId') or item_id
  2984. return self.playlist_result(
  2985. self._playlist_entries(playlist), playlist_id=playlist_id,
  2986. playlist_title=title)
  2987. def _real_extract(self, url):
  2988. item_id = self._match_id(url)
  2989. url = compat_urlparse.urlunparse(
  2990. compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
  2991. # Handle both video/playlist URLs
  2992. qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
  2993. video_id = qs.get('v', [None])[0]
  2994. playlist_id = qs.get('list', [None])[0]
  2995. if video_id and playlist_id:
  2996. if self._downloader.params.get('noplaylist'):
  2997. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  2998. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  2999. self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
  3000. webpage = self._download_webpage(url, item_id)
  3001. identity_token = self._search_regex(
  3002. r'\bID_TOKEN["\']\s*:\s/l*["\'](.+?)["\']', webpage,
  3003. 'identity token', default=None)
  3004. data = self._extract_yt_initial_data(item_id, webpage)
  3005. tabs = try_get(
  3006. data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
  3007. if tabs:
  3008. return self._extract_from_tabs(item_id, webpage, data, tabs, identity_token)
  3009. playlist = try_get(
  3010. data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
  3011. if playlist:
  3012. return self._extract_from_playlist(item_id, data, playlist)
  3013. # Fallback to video extraction if no playlist alike page is recognized.
  3014. # First check for the current video then try the v attribute of URL query.
  3015. video_id = try_get(
  3016. data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
  3017. compat_str) or video_id
  3018. if video_id:
  3019. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  3020. # Failed to recognize
  3021. raise ExtractorError('Unable to recognize tab page')
  3022. class YoutubePlaylistIE(InfoExtractor):
  3023. IE_DESC = 'YouTube.com playlists'
  3024. _VALID_URL = r'''(?x)(?:
  3025. (?:https?://)?
  3026. (?:\w+\.)?
  3027. (?:
  3028. (?:
  3029. youtube(?:kids)?\.com|
  3030. invidio\.us|
  3031. youtu\.be
  3032. )
  3033. /.*?\?.*?\blist=
  3034. )?
  3035. (?P<id>%(playlist_id)s)
  3036. )''' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  3037. IE_NAME = 'youtube:playlist'
  3038. _TESTS = [{
  3039. 'note': 'issue #673',
  3040. 'url': 'PLBB231211A4F62143',
  3041. 'info_dict': {
  3042. 'title': '[OLD]Team Fortress 2 (Class-based LP)',
  3043. 'id': 'PLBB231211A4F62143',
  3044. 'uploader': 'Wickydoo',
  3045. 'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
  3046. },
  3047. 'playlist_mincount': 29,
  3048. }, {
  3049. 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  3050. 'info_dict': {
  3051. 'title': 'YDL_safe_search',
  3052. 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  3053. },
  3054. 'playlist_count': 2,
  3055. 'skip': 'This playlist is private',
  3056. }, {
  3057. 'note': 'embedded',
  3058. 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  3059. 'playlist_count': 4,
  3060. 'info_dict': {
  3061. 'title': 'JODA15',
  3062. 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  3063. 'uploader': 'milan',
  3064. 'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
  3065. }
  3066. }, {
  3067. 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  3068. 'playlist_mincount': 982,
  3069. 'info_dict': {
  3070. 'title': '2018 Chinese New Singles (11/6 updated)',
  3071. 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  3072. 'uploader': 'LBK',
  3073. 'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
  3074. }
  3075. }, {
  3076. 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
  3077. 'info_dict': {
  3078. 'id': 'yeWKywCrFtk',
  3079. 'ext': 'mp4',
  3080. 'title': 'Small Scale Baler and Braiding Rugs',
  3081. 'uploader': 'Backus-Page House Museum',
  3082. 'uploader_id': 'backuspagemuseum',
  3083. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
  3084. 'upload_date': '20161008',
  3085. 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
  3086. 'categories': ['Nonprofits & Activism'],
  3087. 'tags': list,
  3088. 'like_count': int,
  3089. 'dislike_count': int,
  3090. },
  3091. 'params': {
  3092. 'noplaylist': True,
  3093. 'skip_download': True,
  3094. },
  3095. }, {
  3096. 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
  3097. 'only_matching': True,
  3098. }, {
  3099. 'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
  3100. 'only_matching': True,
  3101. }, {
  3102. # music album playlist
  3103. 'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
  3104. 'only_matching': True,
  3105. }]
  3106. @classmethod
  3107. def suitable(cls, url):
  3108. return False if YoutubeTabIE.suitable(url) else super(
  3109. YoutubePlaylistIE, cls).suitable(url)
  3110. def _real_extract(self, url):
  3111. playlist_id = self._match_id(url)
  3112. qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
  3113. if not qs:
  3114. qs = {'list': playlist_id}
  3115. return self.url_result(
  3116. update_url_query('https://www.youtube.com/playlist', qs),
  3117. ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
  3118. class YoutubeYtUserIE(InfoExtractor):
  3119. _VALID_URL = r'ytuser:(?P<id>.+)'
  3120. _TESTS = [{
  3121. 'url': 'ytuser:phihag',
  3122. 'only_matching': True,
  3123. }]
  3124. def _real_extract(self, url):
  3125. user_id = self._match_id(url)
  3126. return self.url_result(
  3127. 'https://www.youtube.com/user/%s' % user_id,
  3128. ie=YoutubeTabIE.ie_key(), video_id=user_id)
  3129. class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
  3130. IE_DESC = 'YouTube.com searches'
  3131. # there doesn't appear to be a real limit, for example if you search for
  3132. # 'python' you get more than 8.000.000 results
  3133. _MAX_RESULTS = float('inf')
  3134. IE_NAME = 'youtube:search'
  3135. _SEARCH_KEY = 'ytsearch'
  3136. _SEARCH_PARAMS = None
  3137. _TESTS = []
  3138. def _entries(self, query, n):
  3139. data = {
  3140. 'context': {
  3141. 'client': {
  3142. 'clientName': 'WEB',
  3143. 'clientVersion': '2.20201021.03.00',
  3144. }
  3145. },
  3146. 'query': query,
  3147. }
  3148. if self._SEARCH_PARAMS:
  3149. data['params'] = self._SEARCH_PARAMS
  3150. total = 0
  3151. for page_num in itertools.count(1):
  3152. search = self._download_json(
  3153. 'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
  3154. video_id='query "%s"' % query,
  3155. note='Downloading page %s' % page_num,
  3156. errnote='Unable to download API page', fatal=False,
  3157. data=json.dumps(data).encode('utf8'),
  3158. headers={'content-type': 'application/json'})
  3159. if not search:
  3160. break
  3161. slr_contents = try_get(
  3162. search,
  3163. (lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
  3164. lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
  3165. list)
  3166. if not slr_contents:
  3167. break
  3168. isr_contents = try_get(
  3169. slr_contents,
  3170. lambda x: x[0]['itemSectionRenderer']['contents'],
  3171. list)
  3172. if not isr_contents:
  3173. break
  3174. for content in isr_contents:
  3175. if not isinstance(content, dict):
  3176. continue
  3177. video = content.get('videoRenderer')
  3178. if not isinstance(video, dict):
  3179. continue
  3180. video_id = video.get('videoId')
  3181. if not video_id:
  3182. continue
  3183. title = try_get(video, lambda x: x['title']['runs'][0]['text'], compat_str)
  3184. description = try_get(video, lambda x: x['descriptionSnippet']['runs'][0]['text'], compat_str)
  3185. duration = parse_duration(try_get(video, lambda x: x['lengthText']['simpleText'], compat_str))
  3186. view_count_text = try_get(video, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
  3187. view_count = int_or_none(self._search_regex(
  3188. r'^(\d+)', re.sub(r'\s', '', view_count_text),
  3189. 'view count', default=None))
  3190. uploader = try_get(video, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
  3191. total += 1
  3192. yield {
  3193. '_type': 'url_transparent',
  3194. 'ie_key': YoutubeIE.ie_key(),
  3195. 'id': video_id,
  3196. 'url': video_id,
  3197. 'title': title,
  3198. 'description': description,
  3199. 'duration': duration,
  3200. 'view_count': view_count,
  3201. 'uploader': uploader,
  3202. }
  3203. if total == n:
  3204. return
  3205. token = try_get(
  3206. slr_contents,
  3207. lambda x: x[1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
  3208. compat_str)
  3209. if not token:
  3210. break
  3211. data['continuation'] = token
  3212. def _get_n_results(self, query, n):
  3213. """Get a specified number of results for a query"""
  3214. return self.playlist_result(self._entries(query, n), query)
  3215. class YoutubeSearchDateIE(YoutubeSearchIE):
  3216. IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
  3217. _SEARCH_KEY = 'ytsearchdate'
  3218. IE_DESC = 'YouTube.com searches, newest videos first'
  3219. _SEARCH_PARAMS = 'CAI%3D'
  3220. class YoutubeSearchURLIE(InfoExtractor):
  3221. IE_DESC = 'YouTube.com search URLs'
  3222. IE_NAME = 'youtube:search_url'
  3223. _VALID_URL = r'https?://(?:www\.)?youtube\.com/results/?(?:\?|\?[^#]*?&)(?:sp=(?P<param1>[^&#]+)&(?:[^#]*&)?)?(?:q|search_query)=(?P<query>[^#&]+)(?:[^#]*?&sp=(?P<param2>[^#&]+))?'
  3224. # _MAX_RESULTS = 100
  3225. _TESTS = [{
  3226. 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
  3227. 'playlist_mincount': 5,
  3228. 'info_dict': {
  3229. 'title': 'youtube-dl test video',
  3230. }
  3231. }, {
  3232. 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
  3233. 'only_matching': True,
  3234. }]
  3235. def _real_extract(self, url):
  3236. mobj = re.match(self._VALID_URL, url)
  3237. query = compat_urllib_parse_unquote_plus(mobj.group('query'))
  3238. IE = YoutubeSearchIE(self._downloader)
  3239. IE._SEARCH_PARAMS = mobj.group('param1') or mobj.group('param2')
  3240. if hasattr(self, '_MAX_RESULTS'):
  3241. IE._MAX_RESULTS = self._MAX_RESULTS
  3242. return IE._get_n_results(query, IE._MAX_RESULTS)
  3243. class YoutubeFeedsInfoExtractor(YoutubeTabIE):
  3244. """
  3245. Base class for feed extractors
  3246. Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
  3247. """
  3248. _LOGIN_REQUIRED = True
  3249. # _MAX_PAGES = 5
  3250. _TESTS = []
  3251. @property
  3252. def IE_NAME(self):
  3253. return 'youtube:%s' % self._FEED_NAME
  3254. def _real_initialize(self):
  3255. self._login()
  3256. def _shelf_entries(self, shelf_renderer):
  3257. renderer = try_get(shelf_renderer, lambda x: x['content']['gridRenderer'], dict)
  3258. if not renderer:
  3259. return
  3260. for entry in self._grid_entries(renderer):
  3261. yield entry
  3262. def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token):
  3263. selected_tab = self._extract_selected_tab(tabs)
  3264. return self.playlist_result(
  3265. self._entries(selected_tab['content'], identity_token),
  3266. playlist_title=self._PLAYLIST_TITLE)
  3267. def _real_extract(self, url):
  3268. item_id = self._FEED_NAME
  3269. url = 'https://www.youtube.com/feed/%s' % self._FEED_NAME
  3270. webpage = self._download_webpage(url, item_id)
  3271. identity_token = self._search_regex(
  3272. r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
  3273. 'identity token', default=None)
  3274. data = self._extract_yt_initial_data(item_id, webpage)
  3275. tabs = try_get(
  3276. data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
  3277. if tabs:
  3278. return self._extract_from_tabs(item_id, webpage, data, tabs, identity_token)
  3279. # Failed to recognize
  3280. raise ExtractorError('Unable to recognize feed page')
  3281. class YoutubeWatchLaterIE(InfoExtractor):
  3282. IE_NAME = 'youtube:watchlater'
  3283. IE_DESC = 'Youtube watch later list, ":ytwatchlater" or "WL" for short (requires authentication)'
  3284. _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/watch_later|:ytwatchlater|WL'
  3285. _TESTS = [{
  3286. 'url': 'https://www.youtube.com/feed/watch_later',
  3287. 'only_matching': True,
  3288. }, {
  3289. 'url': ':ytwatchlater',
  3290. 'only_matching': True,
  3291. }]
  3292. def _real_extract(self, url):
  3293. return self.url_result(
  3294. 'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
  3295. class YoutubeFavouritesIE(InfoExtractor):
  3296. IE_NAME = 'youtube:favourites'
  3297. IE_DESC = 'YouTube.com liked videos, ":ytfav" or "LL" for short (requires authentication)'
  3298. _VALID_URL = r':ytfav(?:ou?rite)?s?|LL'
  3299. _TESTS = [{
  3300. 'url': ':ytfav',
  3301. 'only_matching': True,
  3302. }]
  3303. def _real_extract(self, url):
  3304. return self.url_result(
  3305. 'https://www.youtube.com/playlist?list=LL', ie=YoutubeTabIE.ie_key())
  3306. class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
  3307. IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
  3308. _VALID_URL = r'https?://(?:www\.)?youtube\.com(?:/feed/recommended|/?[?#]|/?$)|:ytrec(?:ommended)?'
  3309. _FEED_NAME = 'recommended'
  3310. _PLAYLIST_TITLE = 'Youtube Recommended videos'
  3311. class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
  3312. IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
  3313. _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsub(?:scription)?s?'
  3314. _FEED_NAME = 'subscriptions'
  3315. _PLAYLIST_TITLE = 'Youtube Subscriptions'
  3316. class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
  3317. IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
  3318. _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
  3319. _FEED_NAME = 'history'
  3320. _PLAYLIST_TITLE = 'Youtube History'
  3321. class YoutubeTruncatedURLIE(InfoExtractor):
  3322. IE_NAME = 'youtube:truncated_url'
  3323. IE_DESC = False # Do not list
  3324. _VALID_URL = r'''(?x)
  3325. (?:https?://)?
  3326. (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
  3327. (?:watch\?(?:
  3328. feature=[a-z_]+|
  3329. annotation_id=annotation_[^&]+|
  3330. x-yt-cl=[0-9]+|
  3331. hl=[^&]*|
  3332. t=[0-9]+
  3333. )?
  3334. |
  3335. attribution_link\?a=[^&]+
  3336. )
  3337. $
  3338. '''
  3339. _TESTS = [{
  3340. 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
  3341. 'only_matching': True,
  3342. }, {
  3343. 'url': 'https://www.youtube.com/watch?',
  3344. 'only_matching': True,
  3345. }, {
  3346. 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
  3347. 'only_matching': True,
  3348. }, {
  3349. 'url': 'https://www.youtube.com/watch?feature=foo',
  3350. 'only_matching': True,
  3351. }, {
  3352. 'url': 'https://www.youtube.com/watch?hl=en-GB',
  3353. 'only_matching': True,
  3354. }, {
  3355. 'url': 'https://www.youtube.com/watch?t=2372',
  3356. 'only_matching': True,
  3357. }]
  3358. def _real_extract(self, url):
  3359. raise ExtractorError(
  3360. 'Did you forget to quote the URL? Remember that & is a meta '
  3361. 'character in most shells, so you want to put the URL in quotes, '
  3362. 'like youtube-dl '
  3363. '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
  3364. ' or simply youtube-dl BaW_jenozKc .',
  3365. expected=True)
  3366. class YoutubeTruncatedIDIE(InfoExtractor):
  3367. IE_NAME = 'youtube:truncated_id'
  3368. IE_DESC = False # Do not list
  3369. _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
  3370. _TESTS = [{
  3371. 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
  3372. 'only_matching': True,
  3373. }]
  3374. def _real_extract(self, url):
  3375. video_id = self._match_id(url)
  3376. raise ExtractorError(
  3377. 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
  3378. expected=True)
  3379. # Do Youtube show urls even exist anymore? I couldn't find any
  3380. r'''
  3381. class YoutubeShowIE(YoutubeTabIE):
  3382. IE_DESC = 'YouTube.com (multi-season) shows'
  3383. _VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
  3384. IE_NAME = 'youtube:show'
  3385. _TESTS = [{
  3386. 'url': 'https://www.youtube.com/show/airdisasters',
  3387. 'playlist_mincount': 5,
  3388. 'info_dict': {
  3389. 'id': 'airdisasters',
  3390. 'title': 'Air Disasters',
  3391. }
  3392. }]
  3393. def _real_extract(self, url):
  3394. playlist_id = self._match_id(url)
  3395. return super(YoutubeShowIE, self)._real_extract(
  3396. 'https://www.youtube.com/show/%s/playlists' % playlist_id)
  3397. '''