You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3685 lines
164 KiB

  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import itertools
  4. import json
  5. import os.path
  6. import random
  7. import re
  8. import time
  9. import traceback
  10. from .common import InfoExtractor, SearchInfoExtractor
  11. from ..jsinterp import JSInterpreter
  12. from ..swfinterp import SWFInterpreter
  13. from ..compat import (
  14. compat_chr,
  15. compat_kwargs,
  16. compat_parse_qs,
  17. compat_urllib_parse_unquote,
  18. compat_urllib_parse_unquote_plus,
  19. compat_urllib_parse_urlencode,
  20. compat_urllib_parse_urlparse,
  21. compat_urlparse,
  22. compat_str,
  23. )
  24. from ..utils import (
  25. bool_or_none,
  26. clean_html,
  27. error_to_compat_str,
  28. ExtractorError,
  29. float_or_none,
  30. get_element_by_id,
  31. int_or_none,
  32. mimetype2ext,
  33. parse_codecs,
  34. parse_count,
  35. parse_duration,
  36. remove_quotes,
  37. remove_start,
  38. smuggle_url,
  39. str_or_none,
  40. str_to_int,
  41. try_get,
  42. unescapeHTML,
  43. unified_strdate,
  44. unsmuggle_url,
  45. update_url_query,
  46. uppercase_escape,
  47. url_or_none,
  48. urlencode_postdata,
  49. urljoin,
  50. )
  51. class YoutubeBaseInfoExtractor(InfoExtractor):
  52. """Provide base functions for Youtube extractors"""
  53. _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
  54. _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
  55. _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
  56. _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
  57. _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
  58. _RESERVED_NAMES = (
  59. r'course|embed|channel|c|user|playlist|watch|w|results|storefront|'
  60. r'shared|index|account|reporthistory|t/terms|about|upload|signin|logout|'
  61. r'feed/(watch_later|history|subscriptions|library|trending|recommended)')
  62. _NETRC_MACHINE = 'youtube'
  63. # If True it will raise an error if no login info is provided
  64. _LOGIN_REQUIRED = False
  65. _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)'
  66. _YOUTUBE_CLIENT_HEADERS = {
  67. 'x-youtube-client-name': '1',
  68. 'x-youtube-client-version': '1.20200609.04.02',
  69. }
  70. def _set_language(self):
  71. self._set_cookie(
  72. '.youtube.com', 'PREF', 'f1=50000000&f6=8&hl=en',
  73. # YouTube sets the expire time to about two months
  74. expire_time=time.time() + 2 * 30 * 24 * 3600)
  75. def _ids_to_results(self, ids):
  76. return [
  77. self.url_result(vid_id, 'Youtube', video_id=vid_id)
  78. for vid_id in ids]
  79. def _login(self):
  80. """
  81. Attempt to log in to YouTube.
  82. True is returned if successful or skipped.
  83. False is returned if login failed.
  84. If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
  85. """
  86. username, password = self._get_login_info()
  87. # No authentication to be performed
  88. if username is None:
  89. if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
  90. raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
  91. if self._downloader.params.get('cookiefile') and False: # TODO remove 'and False' later - too many people using outdated cookies and open issues, remind them.
  92. self.to_screen('[Cookies] Reminder - Make sure to always use up to date cookies!')
  93. return True
  94. login_page = self._download_webpage(
  95. self._LOGIN_URL, None,
  96. note='Downloading login page',
  97. errnote='unable to fetch login page', fatal=False)
  98. if login_page is False:
  99. return
  100. login_form = self._hidden_inputs(login_page)
  101. def req(url, f_req, note, errnote):
  102. data = login_form.copy()
  103. data.update({
  104. 'pstMsg': 1,
  105. 'checkConnection': 'youtube',
  106. 'checkedDomains': 'youtube',
  107. 'hl': 'en',
  108. 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
  109. 'f.req': json.dumps(f_req),
  110. 'flowName': 'GlifWebSignIn',
  111. 'flowEntry': 'ServiceLogin',
  112. # TODO: reverse actual botguard identifier generation algo
  113. 'bgRequest': '["identifier",""]',
  114. })
  115. return self._download_json(
  116. url, None, note=note, errnote=errnote,
  117. transform_source=lambda s: re.sub(r'^[^[]*', '', s),
  118. fatal=False,
  119. data=urlencode_postdata(data), headers={
  120. 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
  121. 'Google-Accounts-XSRF': 1,
  122. })
  123. def warn(message):
  124. self._downloader.report_warning(message)
  125. lookup_req = [
  126. username,
  127. None, [], None, 'US', None, None, 2, False, True,
  128. [
  129. None, None,
  130. [2, 1, None, 1,
  131. 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
  132. None, [], 4],
  133. 1, [None, None, []], None, None, None, True
  134. ],
  135. username,
  136. ]
  137. lookup_results = req(
  138. self._LOOKUP_URL, lookup_req,
  139. 'Looking up account info', 'Unable to look up account info')
  140. if lookup_results is False:
  141. return False
  142. user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
  143. if not user_hash:
  144. warn('Unable to extract user hash')
  145. return False
  146. challenge_req = [
  147. user_hash,
  148. None, 1, None, [1, None, None, None, [password, None, True]],
  149. [
  150. None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
  151. 1, [None, None, []], None, None, None, True
  152. ]]
  153. challenge_results = req(
  154. self._CHALLENGE_URL, challenge_req,
  155. 'Logging in', 'Unable to log in')
  156. if challenge_results is False:
  157. return
  158. login_res = try_get(challenge_results, lambda x: x[0][5], list)
  159. if login_res:
  160. login_msg = try_get(login_res, lambda x: x[5], compat_str)
  161. warn(
  162. 'Unable to login: %s' % 'Invalid password'
  163. if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
  164. return False
  165. res = try_get(challenge_results, lambda x: x[0][-1], list)
  166. if not res:
  167. warn('Unable to extract result entry')
  168. return False
  169. login_challenge = try_get(res, lambda x: x[0][0], list)
  170. if login_challenge:
  171. challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
  172. if challenge_str == 'TWO_STEP_VERIFICATION':
  173. # SEND_SUCCESS - TFA code has been successfully sent to phone
  174. # QUOTA_EXCEEDED - reached the limit of TFA codes
  175. status = try_get(login_challenge, lambda x: x[5], compat_str)
  176. if status == 'QUOTA_EXCEEDED':
  177. warn('Exceeded the limit of TFA codes, try later')
  178. return False
  179. tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
  180. if not tl:
  181. warn('Unable to extract TL')
  182. return False
  183. tfa_code = self._get_tfa_info('2-step verification code')
  184. if not tfa_code:
  185. warn(
  186. 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
  187. '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
  188. return False
  189. tfa_code = remove_start(tfa_code, 'G-')
  190. tfa_req = [
  191. user_hash, None, 2, None,
  192. [
  193. 9, None, None, None, None, None, None, None,
  194. [None, tfa_code, True, 2]
  195. ]]
  196. tfa_results = req(
  197. self._TFA_URL.format(tl), tfa_req,
  198. 'Submitting TFA code', 'Unable to submit TFA code')
  199. if tfa_results is False:
  200. return False
  201. tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
  202. if tfa_res:
  203. tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
  204. warn(
  205. 'Unable to finish TFA: %s' % 'Invalid TFA code'
  206. if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
  207. return False
  208. check_cookie_url = try_get(
  209. tfa_results, lambda x: x[0][-1][2], compat_str)
  210. else:
  211. CHALLENGES = {
  212. 'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
  213. 'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
  214. 'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
  215. }
  216. challenge = CHALLENGES.get(
  217. challenge_str,
  218. '%s returned error %s.' % (self.IE_NAME, challenge_str))
  219. warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
  220. return False
  221. else:
  222. check_cookie_url = try_get(res, lambda x: x[2], compat_str)
  223. if not check_cookie_url:
  224. warn('Unable to extract CheckCookie URL')
  225. return False
  226. check_cookie_results = self._download_webpage(
  227. check_cookie_url, None, 'Checking cookie', fatal=False)
  228. if check_cookie_results is False:
  229. return False
  230. if 'https://myaccount.google.com/' not in check_cookie_results:
  231. warn('Unable to log in')
  232. return False
  233. return True
  234. def _download_webpage_handle(self, *args, **kwargs):
  235. query = kwargs.get('query', {}).copy()
  236. kwargs['query'] = query
  237. return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
  238. *args, **compat_kwargs(kwargs))
  239. def _get_yt_initial_data(self, video_id, webpage):
  240. config = self._search_regex(
  241. (r'window\["ytInitialData"\]\s*=\s*(.*?)(?<=});',
  242. r'var\s+ytInitialData\s*=\s*(.*?)(?<=});'),
  243. webpage, 'ytInitialData', default=None)
  244. if config:
  245. return self._parse_json(
  246. uppercase_escape(config), video_id, fatal=False)
  247. def _real_initialize(self):
  248. if self._downloader is None:
  249. return
  250. self._set_language()
  251. if not self._login():
  252. return
  253. _DEFAULT_API_DATA = {
  254. 'context': {
  255. 'client': {
  256. 'clientName': 'WEB',
  257. 'clientVersion': '2.20201021.03.00',
  258. }
  259. },
  260. }
  261. _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
  262. def _call_api(self, ep, query, video_id):
  263. data = self._DEFAULT_API_DATA.copy()
  264. data.update(query)
  265. response = self._download_json(
  266. 'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
  267. note='Downloading API JSON', errnote='Unable to download API page',
  268. data=json.dumps(data).encode('utf8'),
  269. headers={'content-type': 'application/json'},
  270. query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
  271. return response
  272. def _extract_yt_initial_data(self, video_id, webpage):
  273. return self._parse_json(
  274. self._search_regex(
  275. (r'%s\s*\n' % self._YT_INITIAL_DATA_RE,
  276. self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
  277. video_id)
  278. class YoutubeIE(YoutubeBaseInfoExtractor):
  279. IE_DESC = 'YouTube.com'
  280. _VALID_URL = r"""(?x)^
  281. (
  282. (?:https?://|//) # http(s):// or protocol-independent URL
  283. (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/|
  284. (?:www\.)?deturl\.com/www\.youtube\.com/|
  285. (?:www\.)?pwnyoutube\.com/|
  286. (?:www\.)?hooktube\.com/|
  287. (?:www\.)?yourepeat\.com/|
  288. tube\.majestyc\.net/|
  289. # Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances
  290. (?:(?:www|dev)\.)?invidio\.us/|
  291. (?:(?:www|no)\.)?invidiou\.sh/|
  292. (?:(?:www|fi|de)\.)?invidious\.snopyta\.org/|
  293. (?:www\.)?invidious\.kabi\.tk/|
  294. (?:www\.)?invidious\.13ad\.de/|
  295. (?:www\.)?invidious\.mastodon\.host/|
  296. (?:www\.)?invidious\.nixnet\.xyz/|
  297. (?:www\.)?invidious\.drycat\.fr/|
  298. (?:www\.)?tube\.poal\.co/|
  299. (?:www\.)?vid\.wxzm\.sx/|
  300. (?:www\.)?yewtu\.be/|
  301. (?:www\.)?yt\.elukerio\.org/|
  302. (?:www\.)?yt\.lelux\.fi/|
  303. (?:www\.)?invidious\.ggc-project\.de/|
  304. (?:www\.)?yt\.maisputain\.ovh/|
  305. (?:www\.)?invidious\.13ad\.de/|
  306. (?:www\.)?invidious\.toot\.koeln/|
  307. (?:www\.)?invidious\.fdn\.fr/|
  308. (?:www\.)?watch\.nettohikari\.com/|
  309. (?:www\.)?kgg2m7yk5aybusll\.onion/|
  310. (?:www\.)?qklhadlycap4cnod\.onion/|
  311. (?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/|
  312. (?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion/|
  313. (?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/|
  314. (?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/|
  315. (?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/|
  316. (?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion/|
  317. youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
  318. (?:.*?\#/)? # handle anchor (#/) redirect urls
  319. (?: # the various things that can precede the ID:
  320. (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
  321. |(?: # or the v= param in all its forms
  322. (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
  323. (?:\?|\#!?) # the params delimiter ? or # or #!
  324. (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
  325. v=
  326. )
  327. ))
  328. |(?:
  329. youtu\.be| # just youtu.be/xxxx
  330. vid\.plus| # or vid.plus/xxxx
  331. zwearz\.com/watch| # or zwearz.com/watch/xxxx
  332. )/
  333. |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
  334. )
  335. )? # all until now is optional -> you can pass the naked ID
  336. (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
  337. (?!.*?\blist=
  338. (?:
  339. %(playlist_id)s| # combined list/video URLs are handled by the playlist IE
  340. WL # WL are handled by the watch later IE
  341. )
  342. )
  343. (?(1).+)? # if we found the ID, everything can follow
  344. $""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  345. _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
  346. _PLAYER_INFO_RE = (
  347. r'/(?P<id>[a-zA-Z0-9_-]{8,})/player_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?/base\.(?P<ext>[a-z]+)$',
  348. r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.(?P<ext>[a-z]+)$',
  349. )
  350. _formats = {
  351. '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  352. '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
  353. '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
  354. '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
  355. '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
  356. '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  357. '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  358. '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  359. # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
  360. '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
  361. '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  362. '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
  363. '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  364. '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
  365. '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  366. '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
  367. '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  368. '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
  369. # 3D videos
  370. '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  371. '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
  372. '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  373. '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
  374. '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
  375. '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  376. '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
  377. # Apple HTTP Live Streaming
  378. '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  379. '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  380. '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  381. '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
  382. '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  383. '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
  384. '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
  385. '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
  386. # DASH mp4 video
  387. '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
  388. '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
  389. '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  390. '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
  391. '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
  392. '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
  393. '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
  394. '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
  395. '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
  396. '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  397. '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
  398. '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
  399. # Dash mp4 audio
  400. '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
  401. '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
  402. '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
  403. '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  404. '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
  405. '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
  406. '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
  407. # Dash webm
  408. '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  409. '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  410. '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  411. '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  412. '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  413. '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
  414. '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
  415. '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  416. '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  417. '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  418. '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  419. '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  420. '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  421. '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  422. '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  423. # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
  424. '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  425. '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  426. '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  427. '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  428. '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
  429. '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
  430. # Dash webm audio
  431. '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
  432. '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
  433. # Dash webm audio with opus inside
  434. '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
  435. '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
  436. '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
  437. # RTMP (unnamed)
  438. '_rtmp': {'protocol': 'rtmp'},
  439. # av01 video only formats sometimes served with "unknown" codecs
  440. '394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  441. '395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  442. '396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  443. '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
  444. }
  445. _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
  446. _GEO_BYPASS = False
  447. IE_NAME = 'youtube'
  448. _TESTS = [
  449. {
  450. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
  451. 'info_dict': {
  452. 'id': 'BaW_jenozKc',
  453. 'ext': 'mp4',
  454. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  455. 'uploader': 'Philipp Hagemeister',
  456. 'uploader_id': 'phihag',
  457. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
  458. 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
  459. 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
  460. 'upload_date': '20121002',
  461. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  462. 'categories': ['Science & Technology'],
  463. 'tags': ['youtube-dl'],
  464. 'duration': 10,
  465. 'view_count': int,
  466. 'like_count': int,
  467. 'dislike_count': int,
  468. 'start_time': 1,
  469. 'end_time': 9,
  470. }
  471. },
  472. {
  473. 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
  474. 'note': 'Embed-only video (#1746)',
  475. 'info_dict': {
  476. 'id': 'yZIXLfi8CZQ',
  477. 'ext': 'mp4',
  478. 'upload_date': '20120608',
  479. 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
  480. 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
  481. 'uploader': 'SET India',
  482. 'uploader_id': 'setindia',
  483. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
  484. 'age_limit': 18,
  485. }
  486. },
  487. {
  488. 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
  489. 'note': 'Use the first video ID in the URL',
  490. 'info_dict': {
  491. 'id': 'BaW_jenozKc',
  492. 'ext': 'mp4',
  493. 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
  494. 'uploader': 'Philipp Hagemeister',
  495. 'uploader_id': 'phihag',
  496. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
  497. 'upload_date': '20121002',
  498. 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
  499. 'categories': ['Science & Technology'],
  500. 'tags': ['youtube-dl'],
  501. 'duration': 10,
  502. 'view_count': int,
  503. 'like_count': int,
  504. 'dislike_count': int,
  505. },
  506. 'params': {
  507. 'skip_download': True,
  508. },
  509. },
  510. {
  511. 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
  512. 'note': '256k DASH audio (format 141) via DASH manifest',
  513. 'info_dict': {
  514. 'id': 'a9LDPn-MO4I',
  515. 'ext': 'm4a',
  516. 'upload_date': '20121002',
  517. 'uploader_id': '8KVIDEO',
  518. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
  519. 'description': '',
  520. 'uploader': '8KVIDEO',
  521. 'title': 'UHDTV TEST 8K VIDEO.mp4'
  522. },
  523. 'params': {
  524. 'youtube_include_dash_manifest': True,
  525. 'format': '141',
  526. },
  527. 'skip': 'format 141 not served anymore',
  528. },
  529. # DASH manifest with encrypted signature
  530. {
  531. 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
  532. 'info_dict': {
  533. 'id': 'IB3lcPjvWLA',
  534. 'ext': 'm4a',
  535. 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
  536. 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
  537. 'duration': 244,
  538. 'uploader': 'AfrojackVEVO',
  539. 'uploader_id': 'AfrojackVEVO',
  540. 'upload_date': '20131011',
  541. },
  542. 'params': {
  543. 'youtube_include_dash_manifest': True,
  544. 'format': '141/bestaudio[ext=m4a]',
  545. },
  546. },
  547. # Controversy video
  548. {
  549. 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
  550. 'info_dict': {
  551. 'id': 'T4XJQO3qol8',
  552. 'ext': 'mp4',
  553. 'duration': 219,
  554. 'upload_date': '20100909',
  555. 'uploader': 'Amazing Atheist',
  556. 'uploader_id': 'TheAmazingAtheist',
  557. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
  558. 'title': 'Burning Everyone\'s Koran',
  559. 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
  560. }
  561. },
  562. # Normal age-gate video (embed allowed)
  563. {
  564. 'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
  565. 'info_dict': {
  566. 'id': 'HtVdAasjOgU',
  567. 'ext': 'mp4',
  568. 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
  569. 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
  570. 'duration': 142,
  571. 'uploader': 'The Witcher',
  572. 'uploader_id': 'WitcherGame',
  573. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
  574. 'upload_date': '20140605',
  575. 'age_limit': 18,
  576. },
  577. },
  578. # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
  579. # YouTube Red ad is not captured for creator
  580. {
  581. 'url': '__2ABJjxzNo',
  582. 'info_dict': {
  583. 'id': '__2ABJjxzNo',
  584. 'ext': 'mp4',
  585. 'duration': 266,
  586. 'upload_date': '20100430',
  587. 'uploader_id': 'deadmau5',
  588. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
  589. 'creator': 'Dada Life, deadmau5',
  590. 'description': 'md5:12c56784b8032162bb936a5f76d55360',
  591. 'uploader': 'deadmau5',
  592. 'title': 'Deadmau5 - Some Chords (HD)',
  593. 'alt_title': 'This Machine Kills Some Chords',
  594. },
  595. 'expected_warnings': [
  596. 'DASH manifest missing',
  597. ]
  598. },
  599. # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
  600. {
  601. 'url': 'lqQg6PlCWgI',
  602. 'info_dict': {
  603. 'id': 'lqQg6PlCWgI',
  604. 'ext': 'mp4',
  605. 'duration': 6085,
  606. 'upload_date': '20150827',
  607. 'uploader_id': 'olympic',
  608. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
  609. 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
  610. 'uploader': 'Olympic',
  611. 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
  612. },
  613. 'params': {
  614. 'skip_download': 'requires avconv',
  615. }
  616. },
  617. # Non-square pixels
  618. {
  619. 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
  620. 'info_dict': {
  621. 'id': '_b-2C3KPAM0',
  622. 'ext': 'mp4',
  623. 'stretched_ratio': 16 / 9.,
  624. 'duration': 85,
  625. 'upload_date': '20110310',
  626. 'uploader_id': 'AllenMeow',
  627. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
  628. 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
  629. 'uploader': '孫ᄋᄅ',
  630. 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
  631. },
  632. },
  633. # url_encoded_fmt_stream_map is empty string
  634. {
  635. 'url': 'qEJwOuvDf7I',
  636. 'info_dict': {
  637. 'id': 'qEJwOuvDf7I',
  638. 'ext': 'webm',
  639. 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
  640. 'description': '',
  641. 'upload_date': '20150404',
  642. 'uploader_id': 'spbelect',
  643. 'uploader': 'Наблюдатели Петербурга',
  644. },
  645. 'params': {
  646. 'skip_download': 'requires avconv',
  647. },
  648. 'skip': 'This live event has ended.',
  649. },
  650. # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
  651. {
  652. 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
  653. 'info_dict': {
  654. 'id': 'FIl7x6_3R5Y',
  655. 'ext': 'webm',
  656. 'title': 'md5:7b81415841e02ecd4313668cde88737a',
  657. 'description': 'md5:116377fd2963b81ec4ce64b542173306',
  658. 'duration': 220,
  659. 'upload_date': '20150625',
  660. 'uploader_id': 'dorappi2000',
  661. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
  662. 'uploader': 'dorappi2000',
  663. 'formats': 'mincount:31',
  664. },
  665. 'skip': 'not actual anymore',
  666. },
  667. # DASH manifest with segment_list
  668. {
  669. 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
  670. 'md5': '8ce563a1d667b599d21064e982ab9e31',
  671. 'info_dict': {
  672. 'id': 'CsmdDsKjzN8',
  673. 'ext': 'mp4',
  674. 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
  675. 'uploader': 'Airtek',
  676. 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
  677. 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
  678. 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
  679. },
  680. 'params': {
  681. 'youtube_include_dash_manifest': True,
  682. 'format': '135', # bestvideo
  683. },
  684. 'skip': 'This live event has ended.',
  685. },
  686. {
  687. # Multifeed videos (multiple cameras), URL is for Main Camera
  688. 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
  689. 'info_dict': {
  690. 'id': 'jqWvoWXjCVs',
  691. 'title': 'teamPGP: Rocket League Noob Stream',
  692. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  693. },
  694. 'playlist': [{
  695. 'info_dict': {
  696. 'id': 'jqWvoWXjCVs',
  697. 'ext': 'mp4',
  698. 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
  699. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  700. 'duration': 7335,
  701. 'upload_date': '20150721',
  702. 'uploader': 'Beer Games Beer',
  703. 'uploader_id': 'beergamesbeer',
  704. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
  705. 'license': 'Standard YouTube License',
  706. },
  707. }, {
  708. 'info_dict': {
  709. 'id': '6h8e8xoXJzg',
  710. 'ext': 'mp4',
  711. 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
  712. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  713. 'duration': 7337,
  714. 'upload_date': '20150721',
  715. 'uploader': 'Beer Games Beer',
  716. 'uploader_id': 'beergamesbeer',
  717. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
  718. 'license': 'Standard YouTube License',
  719. },
  720. }, {
  721. 'info_dict': {
  722. 'id': 'PUOgX5z9xZw',
  723. 'ext': 'mp4',
  724. 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
  725. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  726. 'duration': 7337,
  727. 'upload_date': '20150721',
  728. 'uploader': 'Beer Games Beer',
  729. 'uploader_id': 'beergamesbeer',
  730. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
  731. 'license': 'Standard YouTube License',
  732. },
  733. }, {
  734. 'info_dict': {
  735. 'id': 'teuwxikvS5k',
  736. 'ext': 'mp4',
  737. 'title': 'teamPGP: Rocket League Noob Stream (zim)',
  738. 'description': 'md5:dc7872fb300e143831327f1bae3af010',
  739. 'duration': 7334,
  740. 'upload_date': '20150721',
  741. 'uploader': 'Beer Games Beer',
  742. 'uploader_id': 'beergamesbeer',
  743. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
  744. 'license': 'Standard YouTube License',
  745. },
  746. }],
  747. 'params': {
  748. 'skip_download': True,
  749. },
  750. 'skip': 'This video is not available.',
  751. },
  752. {
  753. # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
  754. 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
  755. 'info_dict': {
  756. 'id': 'gVfLd0zydlo',
  757. 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
  758. },
  759. 'playlist_count': 2,
  760. 'skip': 'Not multifeed anymore',
  761. },
  762. {
  763. 'url': 'https://vid.plus/FlRa-iH7PGw',
  764. 'only_matching': True,
  765. },
  766. {
  767. 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
  768. 'only_matching': True,
  769. },
  770. {
  771. # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  772. # Also tests cut-off URL expansion in video description (see
  773. # https://github.com/ytdl-org/youtube-dl/issues/1892,
  774. # https://github.com/ytdl-org/youtube-dl/issues/8164)
  775. 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
  776. 'info_dict': {
  777. 'id': 'lsguqyKfVQg',
  778. 'ext': 'mp4',
  779. 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
  780. 'alt_title': 'Dark Walk - Position Music',
  781. 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
  782. 'duration': 133,
  783. 'upload_date': '20151119',
  784. 'uploader_id': 'IronSoulElf',
  785. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
  786. 'uploader': 'IronSoulElf',
  787. 'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
  788. 'track': 'Dark Walk - Position Music',
  789. 'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
  790. 'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
  791. },
  792. 'params': {
  793. 'skip_download': True,
  794. },
  795. },
  796. {
  797. # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
  798. 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
  799. 'only_matching': True,
  800. },
  801. {
  802. # Video with yt:stretch=17:0
  803. 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
  804. 'info_dict': {
  805. 'id': 'Q39EVAstoRM',
  806. 'ext': 'mp4',
  807. 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
  808. 'description': 'md5:ee18a25c350637c8faff806845bddee9',
  809. 'upload_date': '20151107',
  810. 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
  811. 'uploader': 'CH GAMER DROID',
  812. },
  813. 'params': {
  814. 'skip_download': True,
  815. },
  816. 'skip': 'This video does not exist.',
  817. },
  818. {
  819. # Video licensed under Creative Commons
  820. 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
  821. 'info_dict': {
  822. 'id': 'M4gD1WSo5mA',
  823. 'ext': 'mp4',
  824. 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
  825. 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
  826. 'duration': 721,
  827. 'upload_date': '20150127',
  828. 'uploader_id': 'BerkmanCenter',
  829. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
  830. 'uploader': 'The Berkman Klein Center for Internet & Society',
  831. 'license': 'Creative Commons Attribution license (reuse allowed)',
  832. },
  833. 'params': {
  834. 'skip_download': True,
  835. },
  836. },
  837. {
  838. # Channel-like uploader_url
  839. 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
  840. 'info_dict': {
  841. 'id': 'eQcmzGIKrzg',
  842. 'ext': 'mp4',
  843. 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
  844. 'description': 'md5:dda0d780d5a6e120758d1711d062a867',
  845. 'duration': 4060,
  846. 'upload_date': '20151119',
  847. 'uploader': 'Bernie Sanders',
  848. 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
  849. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
  850. 'license': 'Creative Commons Attribution license (reuse allowed)',
  851. },
  852. 'params': {
  853. 'skip_download': True,
  854. },
  855. },
  856. {
  857. 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
  858. 'only_matching': True,
  859. },
  860. {
  861. # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
  862. 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
  863. 'only_matching': True,
  864. },
  865. {
  866. # Rental video preview
  867. 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
  868. 'info_dict': {
  869. 'id': 'uGpuVWrhIzE',
  870. 'ext': 'mp4',
  871. 'title': 'Piku - Trailer',
  872. 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
  873. 'upload_date': '20150811',
  874. 'uploader': 'FlixMatrix',
  875. 'uploader_id': 'FlixMatrixKaravan',
  876. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
  877. 'license': 'Standard YouTube License',
  878. },
  879. 'params': {
  880. 'skip_download': True,
  881. },
  882. 'skip': 'This video is not available.',
  883. },
  884. {
  885. # YouTube Red video with episode data
  886. 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
  887. 'info_dict': {
  888. 'id': 'iqKdEhx-dD4',
  889. 'ext': 'mp4',
  890. 'title': 'Isolation - Mind Field (Ep 1)',
  891. 'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
  892. 'duration': 2085,
  893. 'upload_date': '20170118',
  894. 'uploader': 'Vsauce',
  895. 'uploader_id': 'Vsauce',
  896. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
  897. 'series': 'Mind Field',
  898. 'season_number': 1,
  899. 'episode_number': 1,
  900. },
  901. 'params': {
  902. 'skip_download': True,
  903. },
  904. 'expected_warnings': [
  905. 'Skipping DASH manifest',
  906. ],
  907. },
  908. {
  909. # The following content has been identified by the YouTube community
  910. # as inappropriate or offensive to some audiences.
  911. 'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
  912. 'info_dict': {
  913. 'id': '6SJNVb0GnPI',
  914. 'ext': 'mp4',
  915. 'title': 'Race Differences in Intelligence',
  916. 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
  917. 'duration': 965,
  918. 'upload_date': '20140124',
  919. 'uploader': 'New Century Foundation',
  920. 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
  921. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
  922. },
  923. 'params': {
  924. 'skip_download': True,
  925. },
  926. },
  927. {
  928. # itag 212
  929. 'url': '1t24XAntNCY',
  930. 'only_matching': True,
  931. },
  932. {
  933. # geo restricted to JP
  934. 'url': 'sJL6WA-aGkQ',
  935. 'only_matching': True,
  936. },
  937. {
  938. 'url': 'https://invidio.us/watch?v=BaW_jenozKc',
  939. 'only_matching': True,
  940. },
  941. {
  942. # DRM protected
  943. 'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
  944. 'only_matching': True,
  945. },
  946. {
  947. # Video with unsupported adaptive stream type formats
  948. 'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
  949. 'info_dict': {
  950. 'id': 'Z4Vy8R84T1U',
  951. 'ext': 'mp4',
  952. 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
  953. 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
  954. 'duration': 433,
  955. 'upload_date': '20130923',
  956. 'uploader': 'Amelia Putri Harwita',
  957. 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
  958. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
  959. 'formats': 'maxcount:10',
  960. },
  961. 'params': {
  962. 'skip_download': True,
  963. 'youtube_include_dash_manifest': False,
  964. },
  965. 'skip': 'not actual anymore',
  966. },
  967. {
  968. # Youtube Music Auto-generated description
  969. 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
  970. 'info_dict': {
  971. 'id': 'MgNrAu2pzNs',
  972. 'ext': 'mp4',
  973. 'title': 'Voyeur Girl',
  974. 'description': 'md5:7ae382a65843d6df2685993e90a8628f',
  975. 'upload_date': '20190312',
  976. 'uploader': 'Stephen - Topic',
  977. 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
  978. 'artist': 'Stephen',
  979. 'track': 'Voyeur Girl',
  980. 'album': 'it\'s too much love to know my dear',
  981. 'release_date': '20190313',
  982. 'release_year': 2019,
  983. },
  984. 'params': {
  985. 'skip_download': True,
  986. },
  987. },
  988. {
  989. 'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
  990. 'only_matching': True,
  991. },
  992. {
  993. # invalid -> valid video id redirection
  994. 'url': 'DJztXj2GPfl',
  995. 'info_dict': {
  996. 'id': 'DJztXj2GPfk',
  997. 'ext': 'mp4',
  998. 'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
  999. 'description': 'md5:bf577a41da97918e94fa9798d9228825',
  1000. 'upload_date': '20090125',
  1001. 'uploader': 'Prochorowka',
  1002. 'uploader_id': 'Prochorowka',
  1003. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
  1004. 'artist': 'Panjabi MC',
  1005. 'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
  1006. 'album': 'Beware of the Boys (Mundian To Bach Ke)',
  1007. },
  1008. 'params': {
  1009. 'skip_download': True,
  1010. },
  1011. },
  1012. {
  1013. # empty description results in an empty string
  1014. 'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
  1015. 'info_dict': {
  1016. 'id': 'x41yOUIvK2k',
  1017. 'ext': 'mp4',
  1018. 'title': 'IMG 3456',
  1019. 'description': '',
  1020. 'upload_date': '20170613',
  1021. 'uploader_id': 'ElevageOrVert',
  1022. 'uploader': 'ElevageOrVert',
  1023. },
  1024. 'params': {
  1025. 'skip_download': True,
  1026. },
  1027. },
  1028. {
  1029. # with '};' inside yt initial data (see https://github.com/ytdl-org/youtube-dl/issues/27093)
  1030. 'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
  1031. 'info_dict': {
  1032. 'id': 'CHqg6qOn4no',
  1033. 'ext': 'mp4',
  1034. 'title': 'Part 77 Sort a list of simple types in c#',
  1035. 'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
  1036. 'upload_date': '20130831',
  1037. 'uploader_id': 'kudvenkat',
  1038. 'uploader': 'kudvenkat',
  1039. },
  1040. 'params': {
  1041. 'skip_download': True,
  1042. },
  1043. },
  1044. ]
  1045. def __init__(self, *args, **kwargs):
  1046. super(YoutubeIE, self).__init__(*args, **kwargs)
  1047. self._player_cache = {}
  1048. def report_video_info_webpage_download(self, video_id):
  1049. """Report attempt to download video info webpage."""
  1050. self.to_screen('%s: Downloading video info webpage' % video_id)
  1051. def report_information_extraction(self, video_id):
  1052. """Report attempt to extract video information."""
  1053. self.to_screen('%s: Extracting video information' % video_id)
  1054. def report_unavailable_format(self, video_id, format):
  1055. """Report extracted video URL."""
  1056. self.to_screen('%s: Format %s not available' % (video_id, format))
  1057. def report_rtmp_download(self):
  1058. """Indicate the download will use the RTMP protocol."""
  1059. self.to_screen('RTMP download detected')
  1060. def _signature_cache_id(self, example_sig):
  1061. """ Return a string representation of a signature """
  1062. return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
  1063. @classmethod
  1064. def _extract_player_info(cls, player_url):
  1065. for player_re in cls._PLAYER_INFO_RE:
  1066. id_m = re.search(player_re, player_url)
  1067. if id_m:
  1068. break
  1069. else:
  1070. raise ExtractorError('Cannot identify player %r' % player_url)
  1071. return id_m.group('ext'), id_m.group('id')
  1072. def _extract_signature_function(self, video_id, player_url, example_sig):
  1073. player_type, player_id = self._extract_player_info(player_url)
  1074. # Read from filesystem cache
  1075. func_id = '%s_%s_%s' % (
  1076. player_type, player_id, self._signature_cache_id(example_sig))
  1077. assert os.path.basename(func_id) == func_id
  1078. cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
  1079. if cache_spec is not None:
  1080. return lambda s: ''.join(s[i] for i in cache_spec)
  1081. download_note = (
  1082. 'Downloading player %s' % player_url
  1083. if self._downloader.params.get('verbose') else
  1084. 'Downloading %s player %s' % (player_type, player_id)
  1085. )
  1086. if player_type == 'js':
  1087. code = self._download_webpage(
  1088. player_url, video_id,
  1089. note=download_note,
  1090. errnote='Download of %s failed' % player_url)
  1091. res = self._parse_sig_js(code)
  1092. elif player_type == 'swf':
  1093. urlh = self._request_webpage(
  1094. player_url, video_id,
  1095. note=download_note,
  1096. errnote='Download of %s failed' % player_url)
  1097. code = urlh.read()
  1098. res = self._parse_sig_swf(code)
  1099. else:
  1100. assert False, 'Invalid player type %r' % player_type
  1101. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1102. cache_res = res(test_string)
  1103. cache_spec = [ord(c) for c in cache_res]
  1104. self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
  1105. return res
  1106. def _print_sig_code(self, func, example_sig):
  1107. def gen_sig_code(idxs):
  1108. def _genslice(start, end, step):
  1109. starts = '' if start == 0 else str(start)
  1110. ends = (':%d' % (end + step)) if end + step >= 0 else ':'
  1111. steps = '' if step == 1 else (':%d' % step)
  1112. return 's[%s%s%s]' % (starts, ends, steps)
  1113. step = None
  1114. # Quelch pyflakes warnings - start will be set when step is set
  1115. start = '(Never used)'
  1116. for i, prev in zip(idxs[1:], idxs[:-1]):
  1117. if step is not None:
  1118. if i - prev == step:
  1119. continue
  1120. yield _genslice(start, prev, step)
  1121. step = None
  1122. continue
  1123. if i - prev in [-1, 1]:
  1124. step = i - prev
  1125. start = prev
  1126. continue
  1127. else:
  1128. yield 's[%d]' % prev
  1129. if step is None:
  1130. yield 's[%d]' % i
  1131. else:
  1132. yield _genslice(start, i, step)
  1133. test_string = ''.join(map(compat_chr, range(len(example_sig))))
  1134. cache_res = func(test_string)
  1135. cache_spec = [ord(c) for c in cache_res]
  1136. expr_code = ' + '.join(gen_sig_code(cache_spec))
  1137. signature_id_tuple = '(%s)' % (
  1138. ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
  1139. code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
  1140. ' return %s\n') % (signature_id_tuple, expr_code)
  1141. self.to_screen('Extracted signature function:\n' + code)
  1142. def _parse_sig_js(self, jscode):
  1143. funcname = self._search_regex(
  1144. (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1145. r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1146. r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
  1147. r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
  1148. # Obsolete patterns
  1149. r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1150. r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
  1151. r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1152. r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1153. r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1154. r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1155. r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
  1156. r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
  1157. jscode, 'Initial JS player signature function name', group='sig')
  1158. jsi = JSInterpreter(jscode)
  1159. initial_function = jsi.extract_function(funcname)
  1160. return lambda s: initial_function([s])
  1161. def _parse_sig_swf(self, file_contents):
  1162. swfi = SWFInterpreter(file_contents)
  1163. TARGET_CLASSNAME = 'SignatureDecipher'
  1164. searched_class = swfi.extract_class(TARGET_CLASSNAME)
  1165. initial_function = swfi.extract_function(searched_class, 'decipher')
  1166. return lambda s: initial_function([s])
  1167. def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
  1168. """Turn the encrypted s field into a working signature"""
  1169. if player_url is None:
  1170. raise ExtractorError('Cannot decrypt signature without player_url')
  1171. if player_url.startswith('//'):
  1172. player_url = 'https:' + player_url
  1173. elif not re.match(r'https?://', player_url):
  1174. player_url = compat_urlparse.urljoin(
  1175. 'https://www.youtube.com', player_url)
  1176. try:
  1177. player_id = (player_url, self._signature_cache_id(s))
  1178. if player_id not in self._player_cache:
  1179. func = self._extract_signature_function(
  1180. video_id, player_url, s
  1181. )
  1182. self._player_cache[player_id] = func
  1183. func = self._player_cache[player_id]
  1184. if self._downloader.params.get('youtube_print_sig_code'):
  1185. self._print_sig_code(func, s)
  1186. return func(s)
  1187. except Exception as e:
  1188. tb = traceback.format_exc()
  1189. raise ExtractorError(
  1190. 'Signature extraction failed: ' + tb, cause=e)
  1191. def _get_subtitles(self, video_id, webpage, has_live_chat_replay):
  1192. try:
  1193. subs_doc = self._download_xml(
  1194. 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
  1195. video_id, note=False)
  1196. except ExtractorError as err:
  1197. self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
  1198. return {}
  1199. sub_lang_list = {}
  1200. for track in subs_doc.findall('track'):
  1201. lang = track.attrib['lang_code']
  1202. if lang in sub_lang_list:
  1203. continue
  1204. sub_formats = []
  1205. for ext in self._SUBTITLE_FORMATS:
  1206. params = compat_urllib_parse_urlencode({
  1207. 'lang': lang,
  1208. 'v': video_id,
  1209. 'fmt': ext,
  1210. 'name': track.attrib['name'].encode('utf-8'),
  1211. })
  1212. sub_formats.append({
  1213. 'url': 'https://www.youtube.com/api/timedtext?' + params,
  1214. 'ext': ext,
  1215. })
  1216. sub_lang_list[lang] = sub_formats
  1217. if has_live_chat_replay:
  1218. sub_lang_list['live_chat'] = [
  1219. {
  1220. 'video_id': video_id,
  1221. 'ext': 'json',
  1222. 'protocol': 'youtube_live_chat_replay',
  1223. },
  1224. ]
  1225. if not sub_lang_list:
  1226. self._downloader.report_warning('video doesn\'t have subtitles')
  1227. return {}
  1228. return sub_lang_list
  1229. def _get_ytplayer_config(self, video_id, webpage):
  1230. patterns = (
  1231. # User data may contain arbitrary character sequences that may affect
  1232. # JSON extraction with regex, e.g. when '};' is contained the second
  1233. # regex won't capture the whole JSON. Yet working around by trying more
  1234. # concrete regex first keeping in mind proper quoted string handling
  1235. # to be implemented in future that will replace this workaround (see
  1236. # https://github.com/ytdl-org/youtube-dl/issues/7468,
  1237. # https://github.com/ytdl-org/youtube-dl/pull/7599)
  1238. r';ytplayer\.config\s*=\s*({.+?});ytplayer',
  1239. r';ytplayer\.config\s*=\s*({.+?});',
  1240. )
  1241. config = self._search_regex(
  1242. patterns, webpage, 'ytplayer.config', default=None)
  1243. if config:
  1244. return self._parse_json(
  1245. uppercase_escape(config), video_id, fatal=False)
  1246. def _get_music_metadata_from_yt_initial(self, yt_initial):
  1247. music_metadata = []
  1248. key_map = {
  1249. 'Album': 'album',
  1250. 'Artist': 'artist',
  1251. 'Song': 'track'
  1252. }
  1253. contents = try_get(yt_initial, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'])
  1254. if type(contents) is list:
  1255. for content in contents:
  1256. music_track = {}
  1257. if type(content) is not dict:
  1258. continue
  1259. videoSecondaryInfoRenderer = try_get(content, lambda x: x['videoSecondaryInfoRenderer'])
  1260. if type(videoSecondaryInfoRenderer) is not dict:
  1261. continue
  1262. rows = try_get(videoSecondaryInfoRenderer, lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'])
  1263. if type(rows) is not list:
  1264. continue
  1265. for row in rows:
  1266. metadataRowRenderer = try_get(row, lambda x: x['metadataRowRenderer'])
  1267. if type(metadataRowRenderer) is not dict:
  1268. continue
  1269. key = try_get(metadataRowRenderer, lambda x: x['title']['simpleText'])
  1270. value = try_get(metadataRowRenderer, lambda x: x['contents'][0]['simpleText']) or \
  1271. try_get(metadataRowRenderer, lambda x: x['contents'][0]['runs'][0]['text'])
  1272. if type(key) is not str or type(value) is not str:
  1273. continue
  1274. if key in key_map:
  1275. if key_map[key] in music_track:
  1276. # we've started on a new track
  1277. music_metadata.append(music_track)
  1278. music_track = {}
  1279. music_track[key_map[key]] = value
  1280. if len(music_track.keys()):
  1281. music_metadata.append(music_track)
  1282. return music_metadata
  1283. def _get_automatic_captions(self, video_id, webpage):
  1284. """We need the webpage for getting the captions url, pass it as an
  1285. argument to speed up the process."""
  1286. self.to_screen('%s: Looking for automatic captions' % video_id)
  1287. player_config = self._get_ytplayer_config(video_id, webpage)
  1288. err_msg = 'Couldn\'t find automatic captions for %s' % video_id
  1289. if not player_config:
  1290. self._downloader.report_warning(err_msg)
  1291. return {}
  1292. try:
  1293. args = player_config['args']
  1294. caption_url = args.get('ttsurl')
  1295. if caption_url:
  1296. timestamp = args['timestamp']
  1297. # We get the available subtitles
  1298. list_params = compat_urllib_parse_urlencode({
  1299. 'type': 'list',
  1300. 'tlangs': 1,
  1301. 'asrs': 1,
  1302. })
  1303. list_url = caption_url + '&' + list_params
  1304. caption_list = self._download_xml(list_url, video_id)
  1305. original_lang_node = caption_list.find('track')
  1306. if original_lang_node is None:
  1307. self._downloader.report_warning('Video doesn\'t have automatic captions')
  1308. return {}
  1309. original_lang = original_lang_node.attrib['lang_code']
  1310. caption_kind = original_lang_node.attrib.get('kind', '')
  1311. sub_lang_list = {}
  1312. for lang_node in caption_list.findall('target'):
  1313. sub_lang = lang_node.attrib['lang_code']
  1314. sub_formats = []
  1315. for ext in self._SUBTITLE_FORMATS:
  1316. params = compat_urllib_parse_urlencode({
  1317. 'lang': original_lang,
  1318. 'tlang': sub_lang,
  1319. 'fmt': ext,
  1320. 'ts': timestamp,
  1321. 'kind': caption_kind,
  1322. })
  1323. sub_formats.append({
  1324. 'url': caption_url + '&' + params,
  1325. 'ext': ext,
  1326. })
  1327. sub_lang_list[sub_lang] = sub_formats
  1328. return sub_lang_list
  1329. def make_captions(sub_url, sub_langs):
  1330. parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
  1331. caption_qs = compat_parse_qs(parsed_sub_url.query)
  1332. captions = {}
  1333. for sub_lang in sub_langs:
  1334. sub_formats = []
  1335. for ext in self._SUBTITLE_FORMATS:
  1336. caption_qs.update({
  1337. 'tlang': [sub_lang],
  1338. 'fmt': [ext],
  1339. })
  1340. sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
  1341. query=compat_urllib_parse_urlencode(caption_qs, True)))
  1342. sub_formats.append({
  1343. 'url': sub_url,
  1344. 'ext': ext,
  1345. })
  1346. captions[sub_lang] = sub_formats
  1347. return captions
  1348. # New captions format as of 22.06.2017
  1349. player_response = args.get('player_response')
  1350. if player_response and isinstance(player_response, compat_str):
  1351. player_response = self._parse_json(
  1352. player_response, video_id, fatal=False)
  1353. if player_response:
  1354. renderer = player_response['captions']['playerCaptionsTracklistRenderer']
  1355. base_url = renderer['captionTracks'][0]['baseUrl']
  1356. sub_lang_list = []
  1357. for lang in renderer['translationLanguages']:
  1358. lang_code = lang.get('languageCode')
  1359. if lang_code:
  1360. sub_lang_list.append(lang_code)
  1361. return make_captions(base_url, sub_lang_list)
  1362. # Some videos don't provide ttsurl but rather caption_tracks and
  1363. # caption_translation_languages (e.g. 20LmZk1hakA)
  1364. # Does not used anymore as of 22.06.2017
  1365. caption_tracks = args['caption_tracks']
  1366. caption_translation_languages = args['caption_translation_languages']
  1367. caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
  1368. sub_lang_list = []
  1369. for lang in caption_translation_languages.split(','):
  1370. lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
  1371. sub_lang = lang_qs.get('lc', [None])[0]
  1372. if sub_lang:
  1373. sub_lang_list.append(sub_lang)
  1374. return make_captions(caption_url, sub_lang_list)
  1375. # An extractor error can be raise by the download process if there are
  1376. # no automatic captions but there are subtitles
  1377. except (KeyError, IndexError, ExtractorError):
  1378. self._downloader.report_warning(err_msg)
  1379. return {}
  1380. def _mark_watched(self, video_id, video_info, player_response):
  1381. playback_url = url_or_none(try_get(
  1382. player_response,
  1383. lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get(
  1384. video_info, lambda x: x['videostats_playback_base_url'][0]))
  1385. if not playback_url:
  1386. return
  1387. parsed_playback_url = compat_urlparse.urlparse(playback_url)
  1388. qs = compat_urlparse.parse_qs(parsed_playback_url.query)
  1389. # cpn generation algorithm is reverse engineered from base.js.
  1390. # In fact it works even with dummy cpn.
  1391. CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
  1392. cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
  1393. qs.update({
  1394. 'ver': ['2'],
  1395. 'cpn': [cpn],
  1396. })
  1397. playback_url = compat_urlparse.urlunparse(
  1398. parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
  1399. self._download_webpage(
  1400. playback_url, video_id, 'Marking watched',
  1401. 'Unable to mark watched', fatal=False)
  1402. @staticmethod
  1403. def _extract_urls(webpage):
  1404. # Embedded YouTube player
  1405. entries = [
  1406. unescapeHTML(mobj.group('url'))
  1407. for mobj in re.finditer(r'''(?x)
  1408. (?:
  1409. <iframe[^>]+?src=|
  1410. data-video-url=|
  1411. <embed[^>]+?src=|
  1412. embedSWF\(?:\s*|
  1413. <object[^>]+data=|
  1414. new\s+SWFObject\(
  1415. )
  1416. (["\'])
  1417. (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
  1418. (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
  1419. \1''', webpage)]
  1420. # lazyYT YouTube embed
  1421. entries.extend(list(map(
  1422. unescapeHTML,
  1423. re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
  1424. # Wordpress "YouTube Video Importer" plugin
  1425. matches = re.findall(r'''(?x)<div[^>]+
  1426. class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
  1427. data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
  1428. entries.extend(m[-1] for m in matches)
  1429. return entries
  1430. @staticmethod
  1431. def _extract_url(webpage):
  1432. urls = YoutubeIE._extract_urls(webpage)
  1433. return urls[0] if urls else None
  1434. @classmethod
  1435. def extract_id(cls, url):
  1436. mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
  1437. if mobj is None:
  1438. raise ExtractorError('Invalid URL: %s' % url)
  1439. video_id = mobj.group(2)
  1440. return video_id
  1441. def _extract_chapters_from_json(self, webpage, video_id, duration):
  1442. if not webpage:
  1443. return
  1444. data = self._extract_yt_initial_data(video_id, webpage)
  1445. if not data or not isinstance(data, dict):
  1446. return
  1447. chapters_list = try_get(
  1448. data,
  1449. lambda x: x['playerOverlays']
  1450. ['playerOverlayRenderer']
  1451. ['decoratedPlayerBarRenderer']
  1452. ['decoratedPlayerBarRenderer']
  1453. ['playerBar']
  1454. ['chapteredPlayerBarRenderer']
  1455. ['chapters'],
  1456. list)
  1457. if not chapters_list:
  1458. return
  1459. def chapter_time(chapter):
  1460. return float_or_none(
  1461. try_get(
  1462. chapter,
  1463. lambda x: x['chapterRenderer']['timeRangeStartMillis'],
  1464. int),
  1465. scale=1000)
  1466. chapters = []
  1467. for next_num, chapter in enumerate(chapters_list, start=1):
  1468. start_time = chapter_time(chapter)
  1469. if start_time is None:
  1470. continue
  1471. end_time = (chapter_time(chapters_list[next_num])
  1472. if next_num < len(chapters_list) else duration)
  1473. if end_time is None:
  1474. continue
  1475. title = try_get(
  1476. chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
  1477. compat_str)
  1478. chapters.append({
  1479. 'start_time': start_time,
  1480. 'end_time': end_time,
  1481. 'title': title,
  1482. })
  1483. return chapters
  1484. @staticmethod
  1485. def _extract_chapters_from_description(description, duration):
  1486. if not description:
  1487. return None
  1488. chapter_lines = re.findall(
  1489. r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
  1490. description)
  1491. if not chapter_lines:
  1492. return None
  1493. chapters = []
  1494. for next_num, (chapter_line, time_point) in enumerate(
  1495. chapter_lines, start=1):
  1496. start_time = parse_duration(time_point)
  1497. if start_time is None:
  1498. continue
  1499. if start_time > duration:
  1500. break
  1501. end_time = (duration if next_num == len(chapter_lines)
  1502. else parse_duration(chapter_lines[next_num][1]))
  1503. if end_time is None:
  1504. continue
  1505. if end_time > duration:
  1506. end_time = duration
  1507. if start_time > end_time:
  1508. break
  1509. chapter_title = re.sub(
  1510. r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
  1511. chapter_title = re.sub(r'\s+', ' ', chapter_title)
  1512. chapters.append({
  1513. 'start_time': start_time,
  1514. 'end_time': end_time,
  1515. 'title': chapter_title,
  1516. })
  1517. return chapters
  1518. def _extract_chapters(self, webpage, description, video_id, duration):
  1519. return (self._extract_chapters_from_json(webpage, video_id, duration)
  1520. or self._extract_chapters_from_description(description, duration))
  1521. def _real_extract(self, url):
  1522. url, smuggled_data = unsmuggle_url(url, {})
  1523. proto = (
  1524. 'http' if self._downloader.params.get('prefer_insecure', False)
  1525. else 'https')
  1526. start_time = None
  1527. end_time = None
  1528. parsed_url = compat_urllib_parse_urlparse(url)
  1529. for component in [parsed_url.fragment, parsed_url.query]:
  1530. query = compat_parse_qs(component)
  1531. if start_time is None and 't' in query:
  1532. start_time = parse_duration(query['t'][0])
  1533. if start_time is None and 'start' in query:
  1534. start_time = parse_duration(query['start'][0])
  1535. if end_time is None and 'end' in query:
  1536. end_time = parse_duration(query['end'][0])
  1537. # Extract original video URL from URL with redirection, like age verification, using next_url parameter
  1538. mobj = re.search(self._NEXT_URL_RE, url)
  1539. if mobj:
  1540. url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
  1541. video_id = self.extract_id(url)
  1542. # Get video webpage
  1543. url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
  1544. video_webpage, urlh = self._download_webpage_handle(url, video_id)
  1545. qs = compat_parse_qs(compat_urllib_parse_urlparse(urlh.geturl()).query)
  1546. video_id = qs.get('v', [None])[0] or video_id
  1547. # Attempt to extract SWF player URL
  1548. mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
  1549. if mobj is not None:
  1550. player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
  1551. else:
  1552. player_url = None
  1553. dash_mpds = []
  1554. def add_dash_mpd(video_info):
  1555. dash_mpd = video_info.get('dashmpd')
  1556. if dash_mpd and dash_mpd[0] not in dash_mpds:
  1557. dash_mpds.append(dash_mpd[0])
  1558. def add_dash_mpd_pr(pl_response):
  1559. dash_mpd = url_or_none(try_get(
  1560. pl_response, lambda x: x['streamingData']['dashManifestUrl'],
  1561. compat_str))
  1562. if dash_mpd and dash_mpd not in dash_mpds:
  1563. dash_mpds.append(dash_mpd)
  1564. is_live = None
  1565. view_count = None
  1566. def extract_view_count(v_info):
  1567. return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
  1568. def extract_player_response(player_response, video_id):
  1569. pl_response = str_or_none(player_response)
  1570. if not pl_response:
  1571. return
  1572. pl_response = self._parse_json(pl_response, video_id, fatal=False)
  1573. if isinstance(pl_response, dict):
  1574. add_dash_mpd_pr(pl_response)
  1575. return pl_response
  1576. def extract_embedded_config(embed_webpage, video_id):
  1577. embedded_config = self._search_regex(
  1578. r'setConfig\(({.*})\);',
  1579. embed_webpage, 'ytInitialData', default=None)
  1580. if embedded_config:
  1581. return embedded_config
  1582. player_response = {}
  1583. # Get video info
  1584. video_info = {}
  1585. embed_webpage = None
  1586. if (self._og_search_property('restrictions:age', video_webpage, default=None) == '18+'
  1587. or re.search(r'player-age-gate-content">', video_webpage) is not None):
  1588. cookie_keys = self._get_cookies('https://www.youtube.com').keys()
  1589. age_gate = True
  1590. # We simulate the access to the video from www.youtube.com/v/{video_id}
  1591. # this can be viewed without login into Youtube
  1592. url = proto + '://www.youtube.com/embed/%s' % video_id
  1593. embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
  1594. ext = extract_embedded_config(embed_webpage, video_id)
  1595. # playabilityStatus = re.search(r'{\\\"status\\\":\\\"(?P<playabilityStatus>[^\"]+)\\\"', ext)
  1596. playable_in_embed = re.search(r'{\\\"playableInEmbed\\\":(?P<playableinEmbed>[^\,]+)', ext)
  1597. if not playable_in_embed:
  1598. self.to_screen('Could not determine whether playabale in embed for video %s' % video_id)
  1599. playable_in_embed = ''
  1600. else:
  1601. playable_in_embed = playable_in_embed.group('playableinEmbed')
  1602. # check if video is only playable on youtube in other words not playable in embed - if so it requires auth (cookies)
  1603. # if re.search(r'player-unavailable">', embed_webpage) is not None:
  1604. if playable_in_embed == 'false':
  1605. '''
  1606. # TODO apply this patch when Support for Python 2.6(!) and above drops
  1607. if ({'VISITOR_INFO1_LIVE', 'HSID', 'SSID', 'SID'} <= cookie_keys
  1608. or {'VISITOR_INFO1_LIVE', '__Secure-3PSID', 'LOGIN_INFO'} <= cookie_keys):
  1609. '''
  1610. if (set(('VISITOR_INFO1_LIVE', 'HSID', 'SSID', 'SID')) <= set(cookie_keys)
  1611. or set(('VISITOR_INFO1_LIVE', '__Secure-3PSID', 'LOGIN_INFO')) <= set(cookie_keys)):
  1612. age_gate = False
  1613. # Try looking directly into the video webpage
  1614. ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
  1615. if ytplayer_config:
  1616. args = ytplayer_config.get("args")
  1617. if args is not None:
  1618. if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
  1619. # Convert to the same format returned by compat_parse_qs
  1620. video_info = dict((k, [v]) for k, v in args.items())
  1621. add_dash_mpd(video_info)
  1622. # Rental video is not rented but preview is available (e.g.
  1623. # https://www.youtube.com/watch?v=yYr8q0y5Jfg,
  1624. # https://github.com/ytdl-org/youtube-dl/issues/10532)
  1625. if not video_info and args.get('ypc_vid'):
  1626. return self.url_result(
  1627. args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
  1628. if args.get('livestream') == '1' or args.get('live_playback') == 1:
  1629. is_live = True
  1630. if not player_response:
  1631. player_response = extract_player_response(args.get('player_response'), video_id)
  1632. elif not player_response:
  1633. player_response = ytplayer_config
  1634. if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
  1635. add_dash_mpd_pr(player_response)
  1636. else:
  1637. raise ExtractorError('Video is age restricted and only playable on Youtube. Requires cookies!', expected=True)
  1638. else:
  1639. data = compat_urllib_parse_urlencode({
  1640. 'video_id': video_id,
  1641. 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
  1642. 'sts': self._search_regex(
  1643. r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
  1644. })
  1645. video_info_url = proto + '://www.youtube.com/get_video_info?' + data
  1646. try:
  1647. video_info_webpage = self._download_webpage(
  1648. video_info_url, video_id,
  1649. note='Refetching age-gated info webpage',
  1650. errnote='unable to download video info webpage')
  1651. except ExtractorError:
  1652. video_info_webpage = None
  1653. if video_info_webpage:
  1654. video_info = compat_parse_qs(video_info_webpage)
  1655. pl_response = video_info.get('player_response', [None])[0]
  1656. player_response = extract_player_response(pl_response, video_id)
  1657. add_dash_mpd(video_info)
  1658. view_count = extract_view_count(video_info)
  1659. else:
  1660. age_gate = False
  1661. # Try looking directly into the video webpage
  1662. ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
  1663. if ytplayer_config:
  1664. args = ytplayer_config.get('args', {})
  1665. if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
  1666. # Convert to the same format returned by compat_parse_qs
  1667. video_info = dict((k, [v]) for k, v in args.items())
  1668. add_dash_mpd(video_info)
  1669. # Rental video is not rented but preview is available (e.g.
  1670. # https://www.youtube.com/watch?v=yYr8q0y5Jfg,
  1671. # https://github.com/ytdl-org/youtube-dl/issues/10532)
  1672. if not video_info and args.get('ypc_vid'):
  1673. return self.url_result(
  1674. args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
  1675. if args.get('livestream') == '1' or args.get('live_playback') == 1:
  1676. is_live = True
  1677. if not player_response:
  1678. player_response = extract_player_response(args.get('player_response'), video_id)
  1679. if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
  1680. add_dash_mpd_pr(player_response)
  1681. if not video_info and not player_response:
  1682. player_response = extract_player_response(
  1683. self._search_regex(
  1684. r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;', video_webpage,
  1685. 'initial player response', default='{}'),
  1686. video_id)
  1687. def extract_unavailable_message():
  1688. messages = []
  1689. for tag, kind in (('h1', 'message'), ('div', 'submessage')):
  1690. msg = self._html_search_regex(
  1691. r'(?s)<{tag}[^>]+id=["\']unavailable-{kind}["\'][^>]*>(.+?)</{tag}>'.format(tag=tag, kind=kind),
  1692. video_webpage, 'unavailable %s' % kind, default=None)
  1693. if msg:
  1694. messages.append(msg)
  1695. if messages:
  1696. return '\n'.join(messages)
  1697. if not video_info and not player_response:
  1698. unavailable_message = extract_unavailable_message()
  1699. if not unavailable_message:
  1700. unavailable_message = 'Unable to extract video data'
  1701. raise ExtractorError(
  1702. 'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id)
  1703. if not isinstance(video_info, dict):
  1704. video_info = {}
  1705. video_details = try_get(
  1706. player_response, lambda x: x['videoDetails'], dict) or {}
  1707. microformat = try_get(
  1708. player_response, lambda x: x['microformat']['playerMicroformatRenderer'], dict) or {}
  1709. video_title = video_info.get('title', [None])[0] or video_details.get('title')
  1710. if not video_title:
  1711. self._downloader.report_warning('Unable to extract video title')
  1712. video_title = '_'
  1713. description_original = video_description = get_element_by_id("eow-description", video_webpage)
  1714. if video_description:
  1715. def replace_url(m):
  1716. redir_url = compat_urlparse.urljoin(url, m.group(1))
  1717. parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
  1718. if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
  1719. qs = compat_parse_qs(parsed_redir_url.query)
  1720. q = qs.get('q')
  1721. if q and q[0]:
  1722. return q[0]
  1723. return redir_url
  1724. description_original = video_description = re.sub(r'''(?x)
  1725. <a\s+
  1726. (?:[a-zA-Z-]+="[^"]*"\s+)*?
  1727. (?:title|href)="([^"]+)"\s+
  1728. (?:[a-zA-Z-]+="[^"]*"\s+)*?
  1729. class="[^"]*"[^>]*>
  1730. [^<]+\.{3}\s*
  1731. </a>
  1732. ''', replace_url, video_description)
  1733. video_description = clean_html(video_description)
  1734. else:
  1735. video_description = video_details.get('shortDescription')
  1736. if video_description is None:
  1737. video_description = self._html_search_meta('description', video_webpage)
  1738. if not smuggled_data.get('force_singlefeed', False):
  1739. if not self._downloader.params.get('noplaylist'):
  1740. multifeed_metadata_list = try_get(
  1741. player_response,
  1742. lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
  1743. compat_str) or try_get(
  1744. video_info, lambda x: x['multifeed_metadata_list'][0], compat_str)
  1745. if multifeed_metadata_list:
  1746. entries = []
  1747. feed_ids = []
  1748. for feed in multifeed_metadata_list.split(','):
  1749. # Unquote should take place before split on comma (,) since textual
  1750. # fields may contain comma as well (see
  1751. # https://github.com/ytdl-org/youtube-dl/issues/8536)
  1752. feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
  1753. def feed_entry(name):
  1754. return try_get(feed_data, lambda x: x[name][0], compat_str)
  1755. feed_id = feed_entry('id')
  1756. if not feed_id:
  1757. continue
  1758. feed_title = feed_entry('title')
  1759. title = video_title
  1760. if feed_title:
  1761. title += ' (%s)' % feed_title
  1762. entries.append({
  1763. '_type': 'url_transparent',
  1764. 'ie_key': 'Youtube',
  1765. 'url': smuggle_url(
  1766. '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
  1767. {'force_singlefeed': True}),
  1768. 'title': title,
  1769. })
  1770. feed_ids.append(feed_id)
  1771. self.to_screen(
  1772. 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
  1773. % (', '.join(feed_ids), video_id))
  1774. return self.playlist_result(entries, video_id, video_title, video_description)
  1775. else:
  1776. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  1777. if view_count is None:
  1778. view_count = extract_view_count(video_info)
  1779. if view_count is None and video_details:
  1780. view_count = int_or_none(video_details.get('viewCount'))
  1781. if view_count is None and microformat:
  1782. view_count = int_or_none(microformat.get('viewCount'))
  1783. if is_live is None:
  1784. is_live = bool_or_none(video_details.get('isLive'))
  1785. has_live_chat_replay = False
  1786. if not is_live:
  1787. yt_initial_data = self._get_yt_initial_data(video_id, video_webpage)
  1788. try:
  1789. yt_initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
  1790. has_live_chat_replay = True
  1791. except (KeyError, IndexError, TypeError):
  1792. pass
  1793. # Check for "rental" videos
  1794. if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
  1795. raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True)
  1796. def _extract_filesize(media_url):
  1797. return int_or_none(self._search_regex(
  1798. r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
  1799. streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list) or []
  1800. streaming_formats.extend(try_get(player_response, lambda x: x['streamingData']['adaptiveFormats'], list) or [])
  1801. if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
  1802. self.report_rtmp_download()
  1803. formats = [{
  1804. 'format_id': '_rtmp',
  1805. 'protocol': 'rtmp',
  1806. 'url': video_info['conn'][0],
  1807. 'player_url': player_url,
  1808. }]
  1809. elif not is_live and (streaming_formats or len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
  1810. encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
  1811. if 'rtmpe%3Dyes' in encoded_url_map:
  1812. raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True)
  1813. formats = []
  1814. formats_spec = {}
  1815. fmt_list = video_info.get('fmt_list', [''])[0]
  1816. if fmt_list:
  1817. for fmt in fmt_list.split(','):
  1818. spec = fmt.split('/')
  1819. if len(spec) > 1:
  1820. width_height = spec[1].split('x')
  1821. if len(width_height) == 2:
  1822. formats_spec[spec[0]] = {
  1823. 'resolution': spec[1],
  1824. 'width': int_or_none(width_height[0]),
  1825. 'height': int_or_none(width_height[1]),
  1826. }
  1827. for fmt in streaming_formats:
  1828. itag = str_or_none(fmt.get('itag'))
  1829. if not itag:
  1830. continue
  1831. quality = fmt.get('quality')
  1832. quality_label = fmt.get('qualityLabel') or quality
  1833. formats_spec[itag] = {
  1834. 'asr': int_or_none(fmt.get('audioSampleRate')),
  1835. 'filesize': int_or_none(fmt.get('contentLength')),
  1836. 'format_note': quality_label,
  1837. 'fps': int_or_none(fmt.get('fps')),
  1838. 'height': int_or_none(fmt.get('height')),
  1839. # bitrate for itag 43 is always 2147483647
  1840. 'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None,
  1841. 'width': int_or_none(fmt.get('width')),
  1842. }
  1843. for fmt in streaming_formats:
  1844. if fmt.get('drmFamilies') or fmt.get('drm_families'):
  1845. continue
  1846. url = url_or_none(fmt.get('url'))
  1847. if not url:
  1848. cipher = fmt.get('cipher') or fmt.get('signatureCipher')
  1849. if not cipher:
  1850. continue
  1851. url_data = compat_parse_qs(cipher)
  1852. url = url_or_none(try_get(url_data, lambda x: x['url'][0], compat_str))
  1853. if not url:
  1854. continue
  1855. else:
  1856. cipher = None
  1857. url_data = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
  1858. stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
  1859. # Unsupported FORMAT_STREAM_TYPE_OTF
  1860. if stream_type == 3:
  1861. continue
  1862. format_id = fmt.get('itag') or url_data['itag'][0]
  1863. if not format_id:
  1864. continue
  1865. format_id = compat_str(format_id)
  1866. if cipher:
  1867. if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
  1868. ASSETS_RE = (
  1869. r'<script[^>]+\bsrc=("[^"]+")[^>]+\bname=["\']player_ias/base',
  1870. r'"jsUrl"\s*:\s*("[^"]+")',
  1871. r'"assets":.+?"js":\s*("[^"]+")')
  1872. jsplayer_url_json = self._search_regex(
  1873. ASSETS_RE,
  1874. embed_webpage if age_gate else video_webpage,
  1875. 'JS player URL (1)', default=None)
  1876. if not jsplayer_url_json and not age_gate:
  1877. # We need the embed website after all
  1878. if embed_webpage is None:
  1879. embed_url = proto + '://www.youtube.com/embed/%s' % video_id
  1880. embed_webpage = self._download_webpage(
  1881. embed_url, video_id, 'Downloading embed webpage')
  1882. jsplayer_url_json = self._search_regex(
  1883. ASSETS_RE, embed_webpage, 'JS player URL')
  1884. player_url = json.loads(jsplayer_url_json)
  1885. if player_url is None:
  1886. player_url_json = self._search_regex(
  1887. r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
  1888. video_webpage, 'age gate player URL')
  1889. player_url = json.loads(player_url_json)
  1890. if 'sig' in url_data:
  1891. url += '&signature=' + url_data['sig'][0]
  1892. elif 's' in url_data:
  1893. encrypted_sig = url_data['s'][0]
  1894. if self._downloader.params.get('verbose'):
  1895. if player_url is None:
  1896. player_desc = 'unknown'
  1897. else:
  1898. player_type, player_version = self._extract_player_info(player_url)
  1899. player_desc = '%s player %s' % ('flash' if player_type == 'swf' else 'html5', player_version)
  1900. parts_sizes = self._signature_cache_id(encrypted_sig)
  1901. self.to_screen('{%s} signature length %s, %s' %
  1902. (format_id, parts_sizes, player_desc))
  1903. signature = self._decrypt_signature(
  1904. encrypted_sig, video_id, player_url, age_gate)
  1905. sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature'
  1906. url += '&%s=%s' % (sp, signature)
  1907. if 'ratebypass' not in url:
  1908. url += '&ratebypass=yes'
  1909. dct = {
  1910. 'format_id': format_id,
  1911. 'url': url,
  1912. 'player_url': player_url,
  1913. }
  1914. if format_id in self._formats:
  1915. dct.update(self._formats[format_id])
  1916. if format_id in formats_spec:
  1917. dct.update(formats_spec[format_id])
  1918. # Some itags are not included in DASH manifest thus corresponding formats will
  1919. # lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993).
  1920. # Trying to extract metadata from url_encoded_fmt_stream_map entry.
  1921. mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
  1922. width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
  1923. if width is None:
  1924. width = int_or_none(fmt.get('width'))
  1925. if height is None:
  1926. height = int_or_none(fmt.get('height'))
  1927. filesize = int_or_none(url_data.get(
  1928. 'clen', [None])[0]) or _extract_filesize(url)
  1929. quality = url_data.get('quality', [None])[0] or fmt.get('quality')
  1930. quality_label = url_data.get('quality_label', [None])[0] or fmt.get('qualityLabel')
  1931. tbr = (float_or_none(url_data.get('bitrate', [None])[0], 1000)
  1932. or float_or_none(fmt.get('bitrate'), 1000)) if format_id != '43' else None
  1933. fps = int_or_none(url_data.get('fps', [None])[0]) or int_or_none(fmt.get('fps'))
  1934. more_fields = {
  1935. 'filesize': filesize,
  1936. 'tbr': tbr,
  1937. 'width': width,
  1938. 'height': height,
  1939. 'fps': fps,
  1940. 'format_note': quality_label or quality,
  1941. }
  1942. for key, value in more_fields.items():
  1943. if value:
  1944. dct[key] = value
  1945. type_ = url_data.get('type', [None])[0] or fmt.get('mimeType')
  1946. if type_:
  1947. type_split = type_.split(';')
  1948. kind_ext = type_split[0].split('/')
  1949. if len(kind_ext) == 2:
  1950. kind, _ = kind_ext
  1951. dct['ext'] = mimetype2ext(type_split[0])
  1952. if kind in ('audio', 'video'):
  1953. codecs = None
  1954. for mobj in re.finditer(
  1955. r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
  1956. if mobj.group('key') == 'codecs':
  1957. codecs = mobj.group('val')
  1958. break
  1959. if codecs:
  1960. dct.update(parse_codecs(codecs))
  1961. if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
  1962. dct['downloader_options'] = {
  1963. # Youtube throttles chunks >~10M
  1964. 'http_chunk_size': 10485760,
  1965. }
  1966. formats.append(dct)
  1967. else:
  1968. manifest_url = (
  1969. url_or_none(try_get(
  1970. player_response,
  1971. lambda x: x['streamingData']['hlsManifestUrl'],
  1972. compat_str))
  1973. or url_or_none(try_get(
  1974. video_info, lambda x: x['hlsvp'][0], compat_str)))
  1975. if manifest_url:
  1976. formats = []
  1977. m3u8_formats = self._extract_m3u8_formats(
  1978. manifest_url, video_id, 'mp4', fatal=False)
  1979. for a_format in m3u8_formats:
  1980. itag = self._search_regex(
  1981. r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
  1982. if itag:
  1983. a_format['format_id'] = itag
  1984. if itag in self._formats:
  1985. dct = self._formats[itag].copy()
  1986. dct.update(a_format)
  1987. a_format = dct
  1988. a_format['player_url'] = player_url
  1989. # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
  1990. a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
  1991. if self._downloader.params.get('youtube_include_hls_manifest', True):
  1992. formats.append(a_format)
  1993. else:
  1994. error_message = extract_unavailable_message()
  1995. if not error_message:
  1996. reason_list = try_get(
  1997. player_response,
  1998. lambda x: x['playabilityStatus']['errorScreen']['playerErrorMessageRenderer']['subreason']['runs'],
  1999. list) or []
  2000. for reason in reason_list:
  2001. if not isinstance(reason, dict):
  2002. continue
  2003. reason_text = try_get(reason, lambda x: x['text'], compat_str)
  2004. if reason_text:
  2005. if not error_message:
  2006. error_message = ''
  2007. error_message += reason_text
  2008. if error_message:
  2009. error_message = clean_html(error_message)
  2010. if not error_message:
  2011. error_message = clean_html(try_get(
  2012. player_response, lambda x: x['playabilityStatus']['reason'],
  2013. compat_str))
  2014. if not error_message:
  2015. error_message = clean_html(
  2016. try_get(video_info, lambda x: x['reason'][0], compat_str))
  2017. if error_message:
  2018. raise ExtractorError(error_message, expected=True)
  2019. raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info')
  2020. # uploader
  2021. video_uploader = try_get(
  2022. video_info, lambda x: x['author'][0],
  2023. compat_str) or str_or_none(video_details.get('author'))
  2024. if video_uploader:
  2025. video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
  2026. else:
  2027. self._downloader.report_warning('unable to extract uploader name')
  2028. # uploader_id
  2029. video_uploader_id = None
  2030. video_uploader_url = None
  2031. mobj = re.search(
  2032. r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
  2033. video_webpage)
  2034. if mobj is not None:
  2035. video_uploader_id = mobj.group('uploader_id')
  2036. video_uploader_url = mobj.group('uploader_url')
  2037. else:
  2038. owner_profile_url = url_or_none(microformat.get('ownerProfileUrl'))
  2039. if owner_profile_url:
  2040. video_uploader_id = self._search_regex(
  2041. r'(?:user|channel)/([^/]+)', owner_profile_url, 'uploader id',
  2042. default=None)
  2043. video_uploader_url = owner_profile_url
  2044. channel_id = (
  2045. str_or_none(video_details.get('channelId'))
  2046. or self._html_search_meta(
  2047. 'channelId', video_webpage, 'channel id', default=None)
  2048. or self._search_regex(
  2049. r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
  2050. video_webpage, 'channel id', default=None, group='id'))
  2051. channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
  2052. thumbnails = []
  2053. thumbnails_list = try_get(
  2054. video_details, lambda x: x['thumbnail']['thumbnails'], list) or []
  2055. for t in thumbnails_list:
  2056. if not isinstance(t, dict):
  2057. continue
  2058. thumbnail_url = url_or_none(t.get('url'))
  2059. if not thumbnail_url:
  2060. continue
  2061. thumbnails.append({
  2062. 'url': thumbnail_url,
  2063. 'width': int_or_none(t.get('width')),
  2064. 'height': int_or_none(t.get('height')),
  2065. })
  2066. if not thumbnails:
  2067. video_thumbnail = None
  2068. # We try first to get a high quality image:
  2069. m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
  2070. video_webpage, re.DOTALL)
  2071. if m_thumb is not None:
  2072. video_thumbnail = m_thumb.group(1)
  2073. thumbnail_url = try_get(video_info, lambda x: x['thumbnail_url'][0], compat_str)
  2074. if thumbnail_url:
  2075. video_thumbnail = compat_urllib_parse_unquote_plus(thumbnail_url)
  2076. if video_thumbnail:
  2077. thumbnails.append({'url': video_thumbnail})
  2078. # upload date
  2079. upload_date = self._html_search_meta(
  2080. 'datePublished', video_webpage, 'upload date', default=None)
  2081. if not upload_date:
  2082. upload_date = self._search_regex(
  2083. [r'(?s)id="eow-date.*?>(.*?)</span>',
  2084. r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
  2085. video_webpage, 'upload date', default=None)
  2086. if not upload_date:
  2087. upload_date = microformat.get('publishDate') or microformat.get('uploadDate')
  2088. upload_date = unified_strdate(upload_date)
  2089. video_license = self._html_search_regex(
  2090. r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
  2091. video_webpage, 'license', default=None)
  2092. m_music = re.search(
  2093. r'''(?x)
  2094. <h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
  2095. <ul[^>]*>\s*
  2096. <li>(?P<title>.+?)
  2097. by (?P<creator>.+?)
  2098. (?:
  2099. \(.+?\)|
  2100. <a[^>]*
  2101. (?:
  2102. \bhref=["\']/red[^>]*>| # drop possible
  2103. >\s*Listen ad-free with YouTube Red # YouTube Red ad
  2104. )
  2105. .*?
  2106. )?</li
  2107. ''',
  2108. video_webpage)
  2109. if m_music:
  2110. video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
  2111. video_creator = clean_html(m_music.group('creator'))
  2112. else:
  2113. video_alt_title = video_creator = None
  2114. def extract_meta(field):
  2115. return self._html_search_regex(
  2116. r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
  2117. video_webpage, field, default=None)
  2118. track = extract_meta('Song')
  2119. artist = extract_meta('Artist')
  2120. album = extract_meta('Album')
  2121. # Youtube Music Auto-generated description
  2122. release_date = release_year = None
  2123. if video_description:
  2124. mobj = re.search(r'(?s)Provided to YouTube by [^\n]+\n+(?P<track>[^·]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?', video_description)
  2125. if mobj:
  2126. if not track:
  2127. track = mobj.group('track').strip()
  2128. if not artist:
  2129. artist = mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·'))
  2130. if not album:
  2131. album = mobj.group('album'.strip())
  2132. release_year = mobj.group('release_year')
  2133. release_date = mobj.group('release_date')
  2134. if release_date:
  2135. release_date = release_date.replace('-', '')
  2136. if not release_year:
  2137. release_year = int(release_date[:4])
  2138. if release_year:
  2139. release_year = int(release_year)
  2140. yt_initial = self._get_yt_initial_data(video_id, video_webpage)
  2141. if yt_initial:
  2142. music_metadata = self._get_music_metadata_from_yt_initial(yt_initial)
  2143. if len(music_metadata):
  2144. album = music_metadata[0].get('album')
  2145. artist = music_metadata[0].get('artist')
  2146. track = music_metadata[0].get('track')
  2147. m_episode = re.search(
  2148. r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
  2149. video_webpage)
  2150. if m_episode:
  2151. series = unescapeHTML(m_episode.group('series'))
  2152. season_number = int(m_episode.group('season'))
  2153. episode_number = int(m_episode.group('episode'))
  2154. else:
  2155. series = season_number = episode_number = None
  2156. m_cat_container = self._search_regex(
  2157. r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
  2158. video_webpage, 'categories', default=None)
  2159. category = None
  2160. if m_cat_container:
  2161. category = self._html_search_regex(
  2162. r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
  2163. default=None)
  2164. if not category:
  2165. category = try_get(
  2166. microformat, lambda x: x['category'], compat_str)
  2167. video_categories = None if category is None else [category]
  2168. video_tags = [
  2169. unescapeHTML(m.group('content'))
  2170. for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
  2171. if not video_tags:
  2172. video_tags = try_get(video_details, lambda x: x['keywords'], list)
  2173. def _extract_count(count_name):
  2174. return str_to_int(self._search_regex(
  2175. (r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>' % re.escape(count_name),
  2176. r'["\']label["\']\s*:\s*["\']([\d,.]+)\s+%ss["\']' % re.escape(count_name)),
  2177. video_webpage, count_name, default=None))
  2178. like_count = _extract_count('like')
  2179. dislike_count = _extract_count('dislike')
  2180. if view_count is None:
  2181. view_count = str_to_int(self._search_regex(
  2182. r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
  2183. 'view count', default=None))
  2184. average_rating = (
  2185. float_or_none(video_details.get('averageRating'))
  2186. or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0])))
  2187. # subtitles
  2188. video_subtitles = self.extract_subtitles(
  2189. video_id, video_webpage, has_live_chat_replay)
  2190. automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
  2191. video_duration = try_get(
  2192. video_info, lambda x: int_or_none(x['length_seconds'][0]))
  2193. if not video_duration:
  2194. video_duration = int_or_none(video_details.get('lengthSeconds'))
  2195. if not video_duration:
  2196. video_duration = parse_duration(self._html_search_meta(
  2197. 'duration', video_webpage, 'video duration'))
  2198. # Get Subscriber Count of channel
  2199. subscriber_count = parse_count(self._search_regex(
  2200. r'"text":"([\d\.]+\w?) subscribers"',
  2201. video_webpage,
  2202. 'subscriber count',
  2203. default=None
  2204. ))
  2205. # annotations
  2206. video_annotations = None
  2207. if self._downloader.params.get('writeannotations', False):
  2208. xsrf_token = self._search_regex(
  2209. r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>[A-Za-z0-9+/=]+)\2',
  2210. video_webpage, 'xsrf token', group='xsrf_token', fatal=False)
  2211. invideo_url = try_get(
  2212. player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str)
  2213. if xsrf_token and invideo_url:
  2214. xsrf_field_name = self._search_regex(
  2215. r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2',
  2216. video_webpage, 'xsrf field name',
  2217. group='xsrf_field_name', default='session_token')
  2218. video_annotations = self._download_webpage(
  2219. self._proto_relative_url(invideo_url),
  2220. video_id, note='Downloading annotations',
  2221. errnote='Unable to download video annotations', fatal=False,
  2222. data=urlencode_postdata({xsrf_field_name: xsrf_token}))
  2223. chapters = self._extract_chapters(video_webpage, description_original, video_id, video_duration)
  2224. # Look for the DASH manifest
  2225. if self._downloader.params.get('youtube_include_dash_manifest', True):
  2226. dash_mpd_fatal = True
  2227. for mpd_url in dash_mpds:
  2228. dash_formats = {}
  2229. try:
  2230. def decrypt_sig(mobj):
  2231. s = mobj.group(1)
  2232. dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
  2233. return '/signature/%s' % dec_s
  2234. mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
  2235. for df in self._extract_mpd_formats(
  2236. mpd_url, video_id, fatal=dash_mpd_fatal,
  2237. formats_dict=self._formats):
  2238. if not df.get('filesize'):
  2239. df['filesize'] = _extract_filesize(df['url'])
  2240. # Do not overwrite DASH format found in some previous DASH manifest
  2241. if df['format_id'] not in dash_formats:
  2242. dash_formats[df['format_id']] = df
  2243. # Additional DASH manifests may end up in HTTP Error 403 therefore
  2244. # allow them to fail without bug report message if we already have
  2245. # some DASH manifest succeeded. This is temporary workaround to reduce
  2246. # burst of bug reports until we figure out the reason and whether it
  2247. # can be fixed at all.
  2248. dash_mpd_fatal = False
  2249. except (ExtractorError, KeyError) as e:
  2250. self.report_warning(
  2251. 'Skipping DASH manifest: %r' % e, video_id)
  2252. if dash_formats:
  2253. # Remove the formats we found through non-DASH, they
  2254. # contain less info and it can be wrong, because we use
  2255. # fixed values (for example the resolution). See
  2256. # https://github.com/ytdl-org/youtube-dl/issues/5774 for an
  2257. # example.
  2258. formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
  2259. formats.extend(dash_formats.values())
  2260. # Check for malformed aspect ratio
  2261. stretched_m = re.search(
  2262. r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
  2263. video_webpage)
  2264. if stretched_m:
  2265. w = float(stretched_m.group('w'))
  2266. h = float(stretched_m.group('h'))
  2267. # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
  2268. # We will only process correct ratios.
  2269. if w > 0 and h > 0:
  2270. ratio = w / h
  2271. for f in formats:
  2272. if f.get('vcodec') != 'none':
  2273. f['stretched_ratio'] = ratio
  2274. if not formats:
  2275. if 'reason' in video_info:
  2276. if 'The uploader has not made this video available in your country.' in video_info['reason']:
  2277. regions_allowed = self._html_search_meta(
  2278. 'regionsAllowed', video_webpage, default=None)
  2279. countries = regions_allowed.split(',') if regions_allowed else None
  2280. self.raise_geo_restricted(
  2281. msg=video_info['reason'][0], countries=countries)
  2282. reason = video_info['reason'][0]
  2283. if 'Invalid parameters' in reason:
  2284. unavailable_message = extract_unavailable_message()
  2285. if unavailable_message:
  2286. reason = unavailable_message
  2287. raise ExtractorError(
  2288. 'YouTube said: %s' % reason,
  2289. expected=True, video_id=video_id)
  2290. if video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos']):
  2291. raise ExtractorError('This video is DRM protected.', expected=True)
  2292. self._sort_formats(formats)
  2293. self.mark_watched(video_id, video_info, player_response)
  2294. return {
  2295. 'id': video_id,
  2296. 'uploader': video_uploader,
  2297. 'uploader_id': video_uploader_id,
  2298. 'uploader_url': video_uploader_url,
  2299. 'channel_id': channel_id,
  2300. 'channel_url': channel_url,
  2301. 'upload_date': upload_date,
  2302. 'license': video_license,
  2303. 'creator': video_creator or artist,
  2304. 'title': video_title,
  2305. 'alt_title': video_alt_title or track,
  2306. 'thumbnails': thumbnails,
  2307. 'description': video_description,
  2308. 'categories': video_categories,
  2309. 'tags': video_tags,
  2310. 'subtitles': video_subtitles,
  2311. 'automatic_captions': automatic_captions,
  2312. 'duration': video_duration,
  2313. 'age_limit': 18 if age_gate else 0,
  2314. 'annotations': video_annotations,
  2315. 'chapters': chapters,
  2316. 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
  2317. 'view_count': view_count,
  2318. 'like_count': like_count,
  2319. 'dislike_count': dislike_count,
  2320. 'average_rating': average_rating,
  2321. 'formats': formats,
  2322. 'is_live': is_live,
  2323. 'start_time': start_time,
  2324. 'end_time': end_time,
  2325. 'series': series,
  2326. 'season_number': season_number,
  2327. 'episode_number': episode_number,
  2328. 'track': track,
  2329. 'artist': artist,
  2330. 'album': album,
  2331. 'release_date': release_date,
  2332. 'release_year': release_year,
  2333. 'subscriber_count': subscriber_count,
  2334. }
  2335. class YoutubeTabIE(YoutubeBaseInfoExtractor):
  2336. IE_DESC = 'YouTube.com tab'
  2337. _VALID_URL = r'''(?x)
  2338. https?://
  2339. (?:\w+\.)?
  2340. (?:
  2341. youtube(?:kids)?\.com|
  2342. invidio\.us
  2343. )/
  2344. (?:
  2345. (?:channel|c|user)/|
  2346. (?P<not_channel>
  2347. (?:playlist|watch)\?.*?\blist=
  2348. )|
  2349. (?!(%s)([/#?]|$)) # Direct URLs
  2350. )
  2351. (?P<id>[^/?\#&]+)
  2352. ''' % YoutubeBaseInfoExtractor._RESERVED_NAMES
  2353. IE_NAME = 'youtube:tab'
  2354. _TESTS = [{
  2355. # playlists, multipage
  2356. 'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
  2357. 'playlist_mincount': 94,
  2358. 'info_dict': {
  2359. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2360. 'title': 'Игорь Клейнер - Playlists',
  2361. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2362. },
  2363. }, {
  2364. # playlists, multipage, different order
  2365. 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
  2366. 'playlist_mincount': 94,
  2367. 'info_dict': {
  2368. 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
  2369. 'title': 'Игорь Клейнер - Playlists',
  2370. 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
  2371. },
  2372. }, {
  2373. # playlists, singlepage
  2374. 'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
  2375. 'playlist_mincount': 4,
  2376. 'info_dict': {
  2377. 'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
  2378. 'title': 'ThirstForScience - Playlists',
  2379. 'description': 'md5:609399d937ea957b0f53cbffb747a14c',
  2380. }
  2381. }, {
  2382. 'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
  2383. 'only_matching': True,
  2384. }, {
  2385. # basic, single video playlist
  2386. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2387. 'info_dict': {
  2388. 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2389. 'uploader': 'Sergey M.',
  2390. 'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2391. 'title': 'youtube-dl public playlist',
  2392. },
  2393. 'playlist_count': 1,
  2394. }, {
  2395. # empty playlist
  2396. 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2397. 'info_dict': {
  2398. 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
  2399. 'uploader': 'Sergey M.',
  2400. 'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
  2401. 'title': 'youtube-dl empty playlist',
  2402. },
  2403. 'playlist_count': 0,
  2404. }, {
  2405. # Home tab
  2406. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
  2407. 'info_dict': {
  2408. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2409. 'title': 'lex will - Home',
  2410. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2411. },
  2412. 'playlist_mincount': 2,
  2413. }, {
  2414. # Videos tab
  2415. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
  2416. 'info_dict': {
  2417. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2418. 'title': 'lex will - Videos',
  2419. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2420. },
  2421. 'playlist_mincount': 975,
  2422. }, {
  2423. # Videos tab, sorted by popular
  2424. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
  2425. 'info_dict': {
  2426. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2427. 'title': 'lex will - Videos',
  2428. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2429. },
  2430. 'playlist_mincount': 199,
  2431. }, {
  2432. # Playlists tab
  2433. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
  2434. 'info_dict': {
  2435. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2436. 'title': 'lex will - Playlists',
  2437. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2438. },
  2439. 'playlist_mincount': 17,
  2440. }, {
  2441. # Community tab
  2442. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
  2443. 'info_dict': {
  2444. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2445. 'title': 'lex will - Community',
  2446. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2447. },
  2448. 'playlist_mincount': 18,
  2449. }, {
  2450. # Channels tab
  2451. 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
  2452. 'info_dict': {
  2453. 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
  2454. 'title': 'lex will - Channels',
  2455. 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
  2456. },
  2457. 'playlist_mincount': 138,
  2458. }, {
  2459. 'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2460. 'only_matching': True,
  2461. }, {
  2462. 'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2463. 'only_matching': True,
  2464. }, {
  2465. 'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
  2466. 'only_matching': True,
  2467. }, {
  2468. 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
  2469. 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2470. 'info_dict': {
  2471. 'title': '29C3: Not my department',
  2472. 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
  2473. 'uploader': 'Christiaan008',
  2474. 'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
  2475. },
  2476. 'playlist_count': 96,
  2477. }, {
  2478. 'note': 'Large playlist',
  2479. 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
  2480. 'info_dict': {
  2481. 'title': 'Uploads from Cauchemar',
  2482. 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
  2483. 'uploader': 'Cauchemar',
  2484. 'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
  2485. },
  2486. 'playlist_mincount': 1123,
  2487. }, {
  2488. # even larger playlist, 8832 videos
  2489. 'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
  2490. 'only_matching': True,
  2491. }, {
  2492. 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
  2493. 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
  2494. 'info_dict': {
  2495. 'title': 'Uploads from Interstellar Movie',
  2496. 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
  2497. 'uploader': 'Interstellar Movie',
  2498. 'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
  2499. },
  2500. 'playlist_mincount': 21,
  2501. }, {
  2502. # https://github.com/ytdl-org/youtube-dl/issues/21844
  2503. 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2504. 'info_dict': {
  2505. 'title': 'Data Analysis with Dr Mike Pound',
  2506. 'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
  2507. 'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
  2508. 'uploader': 'Computerphile',
  2509. },
  2510. 'playlist_mincount': 11,
  2511. }, {
  2512. 'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
  2513. 'only_matching': True,
  2514. }, {
  2515. # Playlist URL that does not actually serve a playlist
  2516. 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
  2517. 'info_dict': {
  2518. 'id': 'FqZTN594JQw',
  2519. 'ext': 'webm',
  2520. 'title': "Smiley's People 01 detective, Adventure Series, Action",
  2521. 'uploader': 'STREEM',
  2522. 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
  2523. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
  2524. 'upload_date': '20150526',
  2525. 'license': 'Standard YouTube License',
  2526. 'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
  2527. 'categories': ['People & Blogs'],
  2528. 'tags': list,
  2529. 'view_count': int,
  2530. 'like_count': int,
  2531. 'dislike_count': int,
  2532. },
  2533. 'params': {
  2534. 'skip_download': True,
  2535. },
  2536. 'skip': 'This video is not available.',
  2537. 'add_ie': [YoutubeIE.ie_key()],
  2538. }, {
  2539. 'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
  2540. 'only_matching': True,
  2541. }, {
  2542. 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
  2543. 'only_matching': True,
  2544. }, {
  2545. 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
  2546. 'info_dict': {
  2547. 'id': '9Auq9mYxFEE',
  2548. 'ext': 'mp4',
  2549. 'title': 'Watch Sky News live',
  2550. 'uploader': 'Sky News',
  2551. 'uploader_id': 'skynews',
  2552. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
  2553. 'upload_date': '20191102',
  2554. 'description': 'md5:78de4e1c2359d0ea3ed829678e38b662',
  2555. 'categories': ['News & Politics'],
  2556. 'tags': list,
  2557. 'like_count': int,
  2558. 'dislike_count': int,
  2559. },
  2560. 'params': {
  2561. 'skip_download': True,
  2562. },
  2563. }, {
  2564. 'url': 'https://www.youtube.com/user/TheYoungTurks/live',
  2565. 'info_dict': {
  2566. 'id': 'a48o2S1cPoo',
  2567. 'ext': 'mp4',
  2568. 'title': 'The Young Turks - Live Main Show',
  2569. 'uploader': 'The Young Turks',
  2570. 'uploader_id': 'TheYoungTurks',
  2571. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
  2572. 'upload_date': '20150715',
  2573. 'license': 'Standard YouTube License',
  2574. 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
  2575. 'categories': ['News & Politics'],
  2576. 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
  2577. 'like_count': int,
  2578. 'dislike_count': int,
  2579. },
  2580. 'params': {
  2581. 'skip_download': True,
  2582. },
  2583. 'only_matching': True,
  2584. }, {
  2585. 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
  2586. 'only_matching': True,
  2587. }, {
  2588. 'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
  2589. 'only_matching': True,
  2590. },
  2591. # TODO
  2592. # {
  2593. # 'url': 'https://www.youtube.com/TheYoungTurks/live',
  2594. # 'only_matching': True,
  2595. # }
  2596. ]
  2597. def _extract_channel_id(self, webpage):
  2598. channel_id = self._html_search_meta(
  2599. 'channelId', webpage, 'channel id', default=None)
  2600. if channel_id:
  2601. return channel_id
  2602. channel_url = self._html_search_meta(
  2603. ('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
  2604. 'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
  2605. 'twitter:app:url:googleplay'), webpage, 'channel url')
  2606. return self._search_regex(
  2607. r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
  2608. channel_url, 'channel id')
  2609. @staticmethod
  2610. def _extract_grid_item_renderer(item):
  2611. for item_kind in ('Playlist', 'Video', 'Channel'):
  2612. renderer = item.get('grid%sRenderer' % item_kind)
  2613. if renderer:
  2614. return renderer
  2615. def _extract_video(self, renderer):
  2616. video_id = renderer.get('videoId')
  2617. title = try_get(
  2618. renderer,
  2619. (lambda x: x['title']['runs'][0]['text'],
  2620. lambda x: x['title']['simpleText']), compat_str)
  2621. description = try_get(
  2622. renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
  2623. compat_str)
  2624. duration = parse_duration(try_get(
  2625. renderer, lambda x: x['lengthText']['simpleText'], compat_str))
  2626. view_count_text = try_get(
  2627. renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
  2628. view_count = str_to_int(self._search_regex(
  2629. r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
  2630. 'view count', default=None))
  2631. uploader = try_get(
  2632. renderer, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
  2633. return {
  2634. '_type': 'url_transparent',
  2635. 'ie_key': YoutubeIE.ie_key(),
  2636. 'id': video_id,
  2637. 'url': video_id,
  2638. 'title': title,
  2639. 'description': description,
  2640. 'duration': duration,
  2641. 'view_count': view_count,
  2642. 'uploader': uploader,
  2643. }
  2644. def _grid_entries(self, grid_renderer):
  2645. for item in grid_renderer['items']:
  2646. if not isinstance(item, dict):
  2647. continue
  2648. renderer = self._extract_grid_item_renderer(item)
  2649. if not isinstance(renderer, dict):
  2650. continue
  2651. title = try_get(
  2652. renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
  2653. # playlist
  2654. playlist_id = renderer.get('playlistId')
  2655. if playlist_id:
  2656. yield self.url_result(
  2657. 'https://www.youtube.com/playlist?list=%s' % playlist_id,
  2658. ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
  2659. video_title=title)
  2660. # video
  2661. video_id = renderer.get('videoId')
  2662. if video_id:
  2663. yield self._extract_video(renderer)
  2664. # channel
  2665. channel_id = renderer.get('channelId')
  2666. if channel_id:
  2667. title = try_get(
  2668. renderer, lambda x: x['title']['simpleText'], compat_str)
  2669. yield self.url_result(
  2670. 'https://www.youtube.com/channel/%s' % channel_id,
  2671. ie=YoutubeTabIE.ie_key(), video_title=title)
  2672. def _shelf_entries_trimmed(self, shelf_renderer):
  2673. renderer = try_get(
  2674. shelf_renderer, lambda x: x['content']['horizontalListRenderer'], dict)
  2675. if not renderer:
  2676. return
  2677. # TODO: add support for nested playlists so each shelf is processed
  2678. # as separate playlist
  2679. # TODO: this includes only first N items
  2680. for entry in self._grid_entries(renderer):
  2681. yield entry
  2682. def _shelf_entries(self, shelf_renderer):
  2683. ep = try_get(
  2684. shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
  2685. compat_str)
  2686. shelf_url = urljoin('https://www.youtube.com', ep)
  2687. if not shelf_url:
  2688. return
  2689. title = try_get(
  2690. shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
  2691. yield self.url_result(shelf_url, video_title=title)
  2692. def _playlist_entries(self, video_list_renderer):
  2693. for content in video_list_renderer['contents']:
  2694. if not isinstance(content, dict):
  2695. continue
  2696. renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
  2697. if not isinstance(renderer, dict):
  2698. continue
  2699. video_id = renderer.get('videoId')
  2700. if not video_id:
  2701. continue
  2702. yield self._extract_video(renderer)
  2703. def _itemSection_entries(self, item_sect_renderer):
  2704. for content in item_sect_renderer['contents']:
  2705. if not isinstance(content, dict):
  2706. continue
  2707. renderer = content.get('videoRenderer', {})
  2708. if not isinstance(renderer, dict):
  2709. continue
  2710. video_id = renderer.get('videoId')
  2711. if not video_id:
  2712. continue
  2713. yield self._extract_video(renderer)
  2714. def _rich_entries(self, rich_grid_renderer):
  2715. renderer = try_get(
  2716. rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {}
  2717. video_id = renderer.get('videoId')
  2718. if not video_id:
  2719. return
  2720. yield self._extract_video(renderer)
  2721. def _video_entry(self, video_renderer):
  2722. video_id = video_renderer.get('videoId')
  2723. if video_id:
  2724. return self._extract_video(video_renderer)
  2725. def _post_thread_entries(self, post_thread_renderer):
  2726. post_renderer = try_get(
  2727. post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
  2728. if not post_renderer:
  2729. return
  2730. # video attachment
  2731. video_renderer = try_get(
  2732. post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
  2733. video_id = None
  2734. if video_renderer:
  2735. entry = self._video_entry(video_renderer)
  2736. if entry:
  2737. yield entry
  2738. # inline video links
  2739. runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
  2740. for run in runs:
  2741. if not isinstance(run, dict):
  2742. continue
  2743. ep_url = try_get(
  2744. run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
  2745. if not ep_url:
  2746. continue
  2747. if not YoutubeIE.suitable(ep_url):
  2748. continue
  2749. ep_video_id = YoutubeIE._match_id(ep_url)
  2750. if video_id == ep_video_id:
  2751. continue
  2752. yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
  2753. def _post_thread_continuation_entries(self, post_thread_continuation):
  2754. contents = post_thread_continuation.get('contents')
  2755. if not isinstance(contents, list):
  2756. return
  2757. for content in contents:
  2758. renderer = content.get('backstagePostThreadRenderer')
  2759. if not isinstance(renderer, dict):
  2760. continue
  2761. for entry in self._post_thread_entries(renderer):
  2762. yield entry
  2763. @staticmethod
  2764. def _extract_next_continuation_data(renderer):
  2765. next_continuation = try_get(
  2766. renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict)
  2767. if not next_continuation:
  2768. return
  2769. continuation = next_continuation.get('continuation')
  2770. if not continuation:
  2771. return
  2772. ctp = next_continuation.get('clickTrackingParams')
  2773. return {
  2774. 'ctoken': continuation,
  2775. 'continuation': continuation,
  2776. 'itct': ctp,
  2777. }
  2778. @classmethod
  2779. def _extract_continuation(cls, renderer):
  2780. next_continuation = cls._extract_next_continuation_data(renderer)
  2781. if next_continuation:
  2782. return next_continuation
  2783. contents = renderer.get('contents')
  2784. if not isinstance(contents, list):
  2785. return
  2786. for content in contents:
  2787. if not isinstance(content, dict):
  2788. continue
  2789. continuation_ep = try_get(
  2790. content, lambda x: x['continuationItemRenderer']['continuationEndpoint'],
  2791. dict)
  2792. if not continuation_ep:
  2793. continue
  2794. continuation = try_get(
  2795. continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
  2796. if not continuation:
  2797. continue
  2798. ctp = continuation_ep.get('clickTrackingParams')
  2799. if not ctp:
  2800. continue
  2801. return {
  2802. 'ctoken': continuation,
  2803. 'continuation': continuation,
  2804. 'itct': ctp,
  2805. }
  2806. def _entries(self, tab, identity_token):
  2807. def extract_entries(parent_renderer): # this needs to called again for continuation to work with feeds
  2808. contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
  2809. for content in contents:
  2810. if not isinstance(content, dict):
  2811. continue
  2812. is_renderer = try_get(content, lambda x: x['itemSectionRenderer'], dict)
  2813. if not is_renderer:
  2814. renderer = content.get('richItemRenderer')
  2815. if renderer:
  2816. for entry in self._rich_entries(renderer):
  2817. yield entry
  2818. continuation_list[0] = self._extract_continuation(parent_renderer)
  2819. continue
  2820. isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
  2821. for isr_content in isr_contents:
  2822. if not isinstance(isr_content, dict):
  2823. continue
  2824. renderer = isr_content.get('playlistVideoListRenderer')
  2825. if renderer:
  2826. for entry in self._playlist_entries(renderer):
  2827. yield entry
  2828. continuation_list[0] = self._extract_continuation(renderer)
  2829. continue
  2830. renderer = isr_content.get('gridRenderer')
  2831. if renderer:
  2832. for entry in self._grid_entries(renderer):
  2833. yield entry
  2834. continuation_list[0] = self._extract_continuation(renderer)
  2835. continue
  2836. renderer = isr_content.get('shelfRenderer')
  2837. if renderer:
  2838. for entry in self._shelf_entries(renderer):
  2839. yield entry
  2840. continue
  2841. renderer = isr_content.get('backstagePostThreadRenderer')
  2842. if renderer:
  2843. for entry in self._post_thread_entries(renderer):
  2844. yield entry
  2845. continuation_list[0] = self._extract_continuation(renderer)
  2846. continue
  2847. renderer = isr_content.get('videoRenderer')
  2848. if renderer:
  2849. entry = self._video_entry(renderer)
  2850. if entry:
  2851. yield entry
  2852. if not continuation_list[0]:
  2853. continuation_list[0] = self._extract_continuation(is_renderer)
  2854. if not continuation_list[0]:
  2855. continuation_list[0] = self._extract_continuation(parent_renderer)
  2856. continuation_list = [None] # Python 2 doesnot support nonlocal
  2857. parent_renderer = (
  2858. try_get(tab, lambda x: x['sectionListRenderer'], dict)
  2859. or try_get(tab, lambda x: x['richGridRenderer'], dict) or {})
  2860. for entry in extract_entries(parent_renderer):
  2861. yield entry
  2862. continuation = continuation_list[0]
  2863. headers = {
  2864. 'x-youtube-client-name': '1',
  2865. 'x-youtube-client-version': '2.20201112.04.01',
  2866. }
  2867. if identity_token:
  2868. headers['x-youtube-identity-token'] = identity_token
  2869. for page_num in itertools.count(1):
  2870. if not continuation:
  2871. break
  2872. browse = self._download_json(
  2873. 'https://www.youtube.com/browse_ajax', None,
  2874. 'Downloading page %d' % page_num,
  2875. headers=headers, query=continuation, fatal=False)
  2876. if not browse:
  2877. break
  2878. response = try_get(browse, lambda x: x[1]['response'], dict)
  2879. if not response:
  2880. break
  2881. continuation_contents = try_get(
  2882. response, lambda x: x['continuationContents'], dict)
  2883. if continuation_contents:
  2884. continuation_renderer = continuation_contents.get('playlistVideoListContinuation')
  2885. if continuation_renderer:
  2886. for entry in self._playlist_entries(continuation_renderer):
  2887. yield entry
  2888. continuation = self._extract_continuation(continuation_renderer)
  2889. continue
  2890. continuation_renderer = continuation_contents.get('gridContinuation')
  2891. if continuation_renderer:
  2892. for entry in self._grid_entries(continuation_renderer):
  2893. yield entry
  2894. continuation = self._extract_continuation(continuation_renderer)
  2895. continue
  2896. continuation_renderer = continuation_contents.get('itemSectionContinuation')
  2897. if continuation_renderer:
  2898. for entry in self._post_thread_continuation_entries(continuation_renderer):
  2899. yield entry
  2900. continuation = self._extract_continuation(continuation_renderer)
  2901. continue
  2902. continuation_renderer = continuation_contents.get('sectionListContinuation') # for feeds
  2903. if continuation_renderer:
  2904. continuation_list = [None]
  2905. for entry in extract_entries(continuation_renderer):
  2906. yield entry
  2907. continuation = continuation_list[0]
  2908. continue
  2909. continuation_items = try_get(
  2910. response, lambda x: x['onResponseReceivedActions'][0]['appendContinuationItemsAction']['continuationItems'], list)
  2911. if continuation_items:
  2912. continuation_item = continuation_items[0]
  2913. if not isinstance(continuation_item, dict):
  2914. continue
  2915. renderer = continuation_item.get('playlistVideoRenderer') or continuation_item.get('itemSectionRenderer')
  2916. if renderer:
  2917. video_list_renderer = {'contents': continuation_items}
  2918. for entry in self._playlist_entries(video_list_renderer):
  2919. yield entry
  2920. continuation = self._extract_continuation(video_list_renderer)
  2921. continue
  2922. break
  2923. @staticmethod
  2924. def _extract_selected_tab(tabs):
  2925. for tab in tabs:
  2926. if try_get(tab, lambda x: x['tabRenderer']['selected'], bool):
  2927. return tab['tabRenderer']
  2928. else:
  2929. raise ExtractorError('Unable to find selected tab')
  2930. @staticmethod
  2931. def _extract_uploader(data):
  2932. uploader = {}
  2933. sidebar_renderer = try_get(
  2934. data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list)
  2935. if sidebar_renderer:
  2936. for item in sidebar_renderer:
  2937. if not isinstance(item, dict):
  2938. continue
  2939. renderer = item.get('playlistSidebarSecondaryInfoRenderer')
  2940. if not isinstance(renderer, dict):
  2941. continue
  2942. owner = try_get(
  2943. renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
  2944. if owner:
  2945. uploader['uploader'] = owner.get('text')
  2946. uploader['uploader_id'] = try_get(
  2947. owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
  2948. uploader['uploader_url'] = urljoin(
  2949. 'https://www.youtube.com/',
  2950. try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
  2951. return uploader
  2952. def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token):
  2953. selected_tab = self._extract_selected_tab(tabs)
  2954. renderer = try_get(
  2955. data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
  2956. playlist_id = title = description = None
  2957. if renderer:
  2958. channel_title = renderer.get('title') or item_id
  2959. tab_title = selected_tab.get('title')
  2960. title = channel_title or item_id
  2961. if tab_title:
  2962. title += ' - %s' % tab_title
  2963. description = renderer.get('description')
  2964. playlist_id = renderer.get('externalId')
  2965. renderer = try_get(
  2966. data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
  2967. if renderer:
  2968. title = renderer.get('title')
  2969. description = None
  2970. playlist_id = item_id
  2971. if playlist_id is None:
  2972. playlist_id = item_id
  2973. if title is None:
  2974. title = "Youtube " + playlist_id.title()
  2975. playlist = self.playlist_result(
  2976. self._entries(selected_tab['content'], identity_token),
  2977. playlist_id=playlist_id, playlist_title=title,
  2978. playlist_description=description)
  2979. playlist.update(self._extract_uploader(data))
  2980. return playlist
  2981. def _extract_from_playlist(self, item_id, data, playlist):
  2982. title = playlist.get('title') or try_get(
  2983. data, lambda x: x['titleText']['simpleText'], compat_str)
  2984. playlist_id = playlist.get('playlistId') or item_id
  2985. return self.playlist_result(
  2986. self._playlist_entries(playlist), playlist_id=playlist_id,
  2987. playlist_title=title)
  2988. def _real_extract(self, url):
  2989. item_id = self._match_id(url)
  2990. url = compat_urlparse.urlunparse(
  2991. compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
  2992. is_home = re.match(r'(?P<pre>%s)(?P<post>/?(?![^#?]).*$)' % self._VALID_URL, url)
  2993. if is_home is not None and is_home.group('not_channel') is None and item_id != 'feed':
  2994. self._downloader.report_warning(
  2995. 'A channel/user page was given. All the channel\'s videos will be downloaded. '
  2996. 'To download only the videos in the home page, add a "/home" to the URL')
  2997. url = '%s/videos%s' % (is_home.group('pre'), is_home.group('post') or '')
  2998. # Handle both video/playlist URLs
  2999. qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
  3000. video_id = qs.get('v', [None])[0]
  3001. playlist_id = qs.get('list', [None])[0]
  3002. if video_id and playlist_id:
  3003. if self._downloader.params.get('noplaylist'):
  3004. self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
  3005. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  3006. self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
  3007. webpage = self._download_webpage(url, item_id)
  3008. identity_token = self._search_regex(
  3009. r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
  3010. 'identity token', default=None)
  3011. data = self._extract_yt_initial_data(item_id, webpage)
  3012. tabs = try_get(
  3013. data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
  3014. if tabs:
  3015. return self._extract_from_tabs(item_id, webpage, data, tabs, identity_token)
  3016. playlist = try_get(
  3017. data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
  3018. if playlist:
  3019. return self._extract_from_playlist(item_id, data, playlist)
  3020. # Fallback to video extraction if no playlist alike page is recognized.
  3021. # First check for the current video then try the v attribute of URL query.
  3022. video_id = try_get(
  3023. data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
  3024. compat_str) or video_id
  3025. if video_id:
  3026. return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
  3027. # Failed to recognize
  3028. raise ExtractorError('Unable to recognize tab page')
  3029. class YoutubePlaylistIE(InfoExtractor):
  3030. IE_DESC = 'YouTube.com playlists'
  3031. _VALID_URL = r'''(?x)(?:
  3032. (?:https?://)?
  3033. (?:\w+\.)?
  3034. (?:
  3035. (?:
  3036. youtube(?:kids)?\.com|
  3037. invidio\.us|
  3038. youtu\.be
  3039. )
  3040. /.*?\?.*?\blist=
  3041. )?
  3042. (?P<id>%(playlist_id)s)
  3043. )''' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
  3044. IE_NAME = 'youtube:playlist'
  3045. _TESTS = [{
  3046. 'note': 'issue #673',
  3047. 'url': 'PLBB231211A4F62143',
  3048. 'info_dict': {
  3049. 'title': '[OLD]Team Fortress 2 (Class-based LP)',
  3050. 'id': 'PLBB231211A4F62143',
  3051. 'uploader': 'Wickydoo',
  3052. 'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
  3053. },
  3054. 'playlist_mincount': 29,
  3055. }, {
  3056. 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  3057. 'info_dict': {
  3058. 'title': 'YDL_safe_search',
  3059. 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
  3060. },
  3061. 'playlist_count': 2,
  3062. 'skip': 'This playlist is private',
  3063. }, {
  3064. 'note': 'embedded',
  3065. 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  3066. 'playlist_count': 4,
  3067. 'info_dict': {
  3068. 'title': 'JODA15',
  3069. 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
  3070. 'uploader': 'milan',
  3071. 'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
  3072. }
  3073. }, {
  3074. 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  3075. 'playlist_mincount': 982,
  3076. 'info_dict': {
  3077. 'title': '2018 Chinese New Singles (11/6 updated)',
  3078. 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
  3079. 'uploader': 'LBK',
  3080. 'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
  3081. }
  3082. }, {
  3083. 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
  3084. 'info_dict': {
  3085. 'id': 'yeWKywCrFtk',
  3086. 'ext': 'mp4',
  3087. 'title': 'Small Scale Baler and Braiding Rugs',
  3088. 'uploader': 'Backus-Page House Museum',
  3089. 'uploader_id': 'backuspagemuseum',
  3090. 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
  3091. 'upload_date': '20161008',
  3092. 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
  3093. 'categories': ['Nonprofits & Activism'],
  3094. 'tags': list,
  3095. 'like_count': int,
  3096. 'dislike_count': int,
  3097. },
  3098. 'params': {
  3099. 'noplaylist': True,
  3100. 'skip_download': True,
  3101. },
  3102. }, {
  3103. 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
  3104. 'only_matching': True,
  3105. }, {
  3106. 'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
  3107. 'only_matching': True,
  3108. }, {
  3109. # music album playlist
  3110. 'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
  3111. 'only_matching': True,
  3112. }]
  3113. @classmethod
  3114. def suitable(cls, url):
  3115. return False if YoutubeTabIE.suitable(url) else super(
  3116. YoutubePlaylistIE, cls).suitable(url)
  3117. def _real_extract(self, url):
  3118. playlist_id = self._match_id(url)
  3119. qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
  3120. if not qs:
  3121. qs = {'list': playlist_id}
  3122. return self.url_result(
  3123. update_url_query('https://www.youtube.com/playlist', qs),
  3124. ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
  3125. class YoutubeYtUserIE(InfoExtractor):
  3126. _VALID_URL = r'ytuser:(?P<id>.+)'
  3127. _TESTS = [{
  3128. 'url': 'ytuser:phihag',
  3129. 'only_matching': True,
  3130. }]
  3131. def _real_extract(self, url):
  3132. user_id = self._match_id(url)
  3133. return self.url_result(
  3134. 'https://www.youtube.com/user/%s' % user_id,
  3135. ie=YoutubeTabIE.ie_key(), video_id=user_id)
  3136. class YoutubeFavouritesIE(InfoExtractor):
  3137. IE_NAME = 'youtube:favorites'
  3138. IE_DESC = 'YouTube.com liked videos, ":ytfav" for short (requires authentication)'
  3139. _VALID_URL = r':ytfav(?:ou?rite)?s?'
  3140. _LOGIN_REQUIRED = True
  3141. _TESTS = [{
  3142. 'url': ':ytfav',
  3143. 'only_matching': True,
  3144. }, {
  3145. 'url': ':ytfavorites',
  3146. 'only_matching': True,
  3147. }]
  3148. def _real_extract(self, url):
  3149. return self.url_result(
  3150. 'https://www.youtube.com/playlist?list=LL',
  3151. ie=YoutubeTabIE.ie_key())
  3152. class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
  3153. IE_DESC = 'YouTube.com searches'
  3154. # there doesn't appear to be a real limit, for example if you search for
  3155. # 'python' you get more than 8.000.000 results
  3156. _MAX_RESULTS = float('inf')
  3157. IE_NAME = 'youtube:search'
  3158. _SEARCH_KEY = 'ytsearch'
  3159. _SEARCH_PARAMS = None
  3160. _TESTS = []
  3161. def _entries(self, query, n):
  3162. data = {
  3163. 'context': {
  3164. 'client': {
  3165. 'clientName': 'WEB',
  3166. 'clientVersion': '2.20201021.03.00',
  3167. }
  3168. },
  3169. 'query': query,
  3170. }
  3171. if self._SEARCH_PARAMS:
  3172. data['params'] = self._SEARCH_PARAMS
  3173. total = 0
  3174. for page_num in itertools.count(1):
  3175. search = self._download_json(
  3176. 'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
  3177. video_id='query "%s"' % query,
  3178. note='Downloading page %s' % page_num,
  3179. errnote='Unable to download API page', fatal=False,
  3180. data=json.dumps(data).encode('utf8'),
  3181. headers={'content-type': 'application/json'})
  3182. if not search:
  3183. break
  3184. slr_contents = try_get(
  3185. search,
  3186. (lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
  3187. lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
  3188. list)
  3189. if not slr_contents:
  3190. break
  3191. isr_contents = try_get(
  3192. slr_contents,
  3193. lambda x: x[0]['itemSectionRenderer']['contents'],
  3194. list)
  3195. if not isr_contents:
  3196. break
  3197. for content in isr_contents:
  3198. if not isinstance(content, dict):
  3199. continue
  3200. video = content.get('videoRenderer')
  3201. if not isinstance(video, dict):
  3202. continue
  3203. video_id = video.get('videoId')
  3204. if not video_id:
  3205. continue
  3206. title = try_get(video, lambda x: x['title']['runs'][0]['text'], compat_str)
  3207. description = try_get(video, lambda x: x['descriptionSnippet']['runs'][0]['text'], compat_str)
  3208. duration = parse_duration(try_get(video, lambda x: x['lengthText']['simpleText'], compat_str))
  3209. view_count_text = try_get(video, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
  3210. view_count = int_or_none(self._search_regex(
  3211. r'^(\d+)', re.sub(r'\s', '', view_count_text),
  3212. 'view count', default=None))
  3213. uploader = try_get(video, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
  3214. total += 1
  3215. yield {
  3216. '_type': 'url_transparent',
  3217. 'ie_key': YoutubeIE.ie_key(),
  3218. 'id': video_id,
  3219. 'url': video_id,
  3220. 'title': title,
  3221. 'description': description,
  3222. 'duration': duration,
  3223. 'view_count': view_count,
  3224. 'uploader': uploader,
  3225. }
  3226. if total == n:
  3227. return
  3228. token = try_get(
  3229. slr_contents,
  3230. lambda x: x[1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
  3231. compat_str)
  3232. if not token:
  3233. break
  3234. data['continuation'] = token
  3235. def _get_n_results(self, query, n):
  3236. """Get a specified number of results for a query"""
  3237. return self.playlist_result(self._entries(query, n), query)
  3238. class YoutubeSearchDateIE(YoutubeSearchIE):
  3239. IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
  3240. _SEARCH_KEY = 'ytsearchdate'
  3241. IE_DESC = 'YouTube.com searches, newest videos first'
  3242. _SEARCH_PARAMS = 'CAI%3D'
  3243. class YoutubeSearchURLIE(YoutubeSearchIE):
  3244. IE_DESC = 'YouTube.com search URLs'
  3245. IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
  3246. _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?:[^&]+)(?:[&]|$)'
  3247. # _MAX_RESULTS = 100
  3248. _TESTS = [{
  3249. 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
  3250. 'playlist_mincount': 5,
  3251. 'info_dict': {
  3252. 'title': 'youtube-dl test video',
  3253. }
  3254. }, {
  3255. 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
  3256. 'only_matching': True,
  3257. }]
  3258. @classmethod
  3259. def _make_valid_url(cls):
  3260. return cls._VALID_URL
  3261. def _real_extract(self, url):
  3262. qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
  3263. query = (qs.get('search_query') or qs.get('q'))[0]
  3264. self._SEARCH_PARAMS = qs.get('sp', ('',))[0]
  3265. return self._get_n_results(query, self._MAX_RESULTS)
  3266. class YoutubeFeedsInfoExtractor(YoutubeTabIE):
  3267. """
  3268. Base class for feed extractors
  3269. Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
  3270. """
  3271. _LOGIN_REQUIRED = True
  3272. # _MAX_PAGES = 5
  3273. _TESTS = []
  3274. @property
  3275. def IE_NAME(self):
  3276. return 'youtube:%s' % self._FEED_NAME
  3277. def _real_initialize(self):
  3278. self._login()
  3279. def _shelf_entries(self, shelf_renderer):
  3280. renderer = try_get(shelf_renderer, lambda x: x['content']['gridRenderer'], dict)
  3281. if not renderer:
  3282. return
  3283. for entry in self._grid_entries(renderer):
  3284. yield entry
  3285. def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token):
  3286. selected_tab = self._extract_selected_tab(tabs)
  3287. return self.playlist_result(
  3288. self._entries(selected_tab['content'], identity_token),
  3289. playlist_title=self._PLAYLIST_TITLE)
  3290. def _real_extract(self, url):
  3291. item_id = self._FEED_NAME
  3292. url = 'https://www.youtube.com/feed/%s' % self._FEED_NAME
  3293. webpage = self._download_webpage(url, item_id)
  3294. identity_token = self._search_regex(
  3295. r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
  3296. 'identity token', default=None)
  3297. data = self._extract_yt_initial_data(item_id, webpage)
  3298. tabs = try_get(
  3299. data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
  3300. if tabs:
  3301. return self._extract_from_tabs(item_id, webpage, data, tabs, identity_token)
  3302. # Failed to recognize
  3303. raise ExtractorError('Unable to recognize feed page')
  3304. class YoutubeWatchLaterIE(InfoExtractor):
  3305. IE_NAME = 'youtube:watchlater'
  3306. _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/watch_later|:ytwatchlater|WL'
  3307. IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
  3308. _TESTS = [{
  3309. 'url': 'https://www.youtube.com/feed/watch_later',
  3310. 'only_matching': True,
  3311. }, {
  3312. 'url': ':ytwatchlater',
  3313. 'only_matching': True,
  3314. }]
  3315. def _real_extract(self, url):
  3316. return self.url_result(
  3317. 'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
  3318. class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
  3319. IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
  3320. _VALID_URL = r'https?://(?:www\.)?youtube\.com(?:/feed/recommended|/?[?#]|/?$)|:ytrec(?:ommended)?'
  3321. _FEED_NAME = 'recommended'
  3322. _PLAYLIST_TITLE = 'Youtube Recommended videos'
  3323. class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
  3324. _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsub(?:scription)?s?'
  3325. IE_DESC = 'YouTube.com subscriptions feed, ":ytsubs" for short (requires authentication)'
  3326. _FEED_NAME = 'subscriptions'
  3327. _PLAYLIST_TITLE = 'Youtube Subscriptions'
  3328. class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
  3329. IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
  3330. _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
  3331. _FEED_NAME = 'history'
  3332. _PLAYLIST_TITLE = 'Youtube History'
  3333. class YoutubeTruncatedURLIE(InfoExtractor):
  3334. IE_NAME = 'youtube:truncated_url'
  3335. IE_DESC = False # Do not list
  3336. _VALID_URL = r'''(?x)
  3337. (?:https?://)?
  3338. (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
  3339. (?:watch\?(?:
  3340. feature=[a-z_]+|
  3341. annotation_id=annotation_[^&]+|
  3342. x-yt-cl=[0-9]+|
  3343. hl=[^&]*|
  3344. t=[0-9]+
  3345. )?
  3346. |
  3347. attribution_link\?a=[^&]+
  3348. )
  3349. $
  3350. '''
  3351. _TESTS = [{
  3352. 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
  3353. 'only_matching': True,
  3354. }, {
  3355. 'url': 'https://www.youtube.com/watch?',
  3356. 'only_matching': True,
  3357. }, {
  3358. 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
  3359. 'only_matching': True,
  3360. }, {
  3361. 'url': 'https://www.youtube.com/watch?feature=foo',
  3362. 'only_matching': True,
  3363. }, {
  3364. 'url': 'https://www.youtube.com/watch?hl=en-GB',
  3365. 'only_matching': True,
  3366. }, {
  3367. 'url': 'https://www.youtube.com/watch?t=2372',
  3368. 'only_matching': True,
  3369. }]
  3370. def _real_extract(self, url):
  3371. raise ExtractorError(
  3372. 'Did you forget to quote the URL? Remember that & is a meta '
  3373. 'character in most shells, so you want to put the URL in quotes, '
  3374. 'like youtube-dl '
  3375. '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
  3376. ' or simply youtube-dl BaW_jenozKc .',
  3377. expected=True)
  3378. class YoutubeTruncatedIDIE(InfoExtractor):
  3379. IE_NAME = 'youtube:truncated_id'
  3380. IE_DESC = False # Do not list
  3381. _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
  3382. _TESTS = [{
  3383. 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
  3384. 'only_matching': True,
  3385. }]
  3386. def _real_extract(self, url):
  3387. video_id = self._match_id(url)
  3388. raise ExtractorError(
  3389. 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
  3390. expected=True)
  3391. # Do Youtube show urls even exist anymore? I couldn't find any
  3392. r'''
  3393. class YoutubeShowIE(YoutubeTabIE):
  3394. IE_DESC = 'YouTube.com (multi-season) shows'
  3395. _VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
  3396. IE_NAME = 'youtube:show'
  3397. _TESTS = [{
  3398. 'url': 'https://www.youtube.com/show/airdisasters',
  3399. 'playlist_mincount': 5,
  3400. 'info_dict': {
  3401. 'id': 'airdisasters',
  3402. 'title': 'Air Disasters',
  3403. }
  3404. }]
  3405. def _real_extract(self, url):
  3406. playlist_id = self._match_id(url)
  3407. return super(YoutubeShowIE, self)._real_extract(
  3408. 'https://www.youtube.com/show/%s/playlists' % playlist_id)
  3409. '''