Revert changes to vlive

This commit is contained in:
pukkandan 2020-11-22 03:46:38 +05:30
parent 8bdd16b499
commit d3260f40cb
1 changed files with 171 additions and 108 deletions

View File

@ -4,48 +4,52 @@ from __future__ import unicode_literals
import re import re
import time import time
import itertools import itertools
import json
from .common import InfoExtractor from .common import InfoExtractor
from .naver import NaverBaseIE from .naver import NaverBaseIE
from ..compat import ( from ..compat import compat_str
compat_HTTPError,
compat_str,
)
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
int_or_none,
merge_dicts, merge_dicts,
try_get, try_get,
urlencode_postdata, urlencode_postdata,
) )
class VLiveBaseIE(NaverBaseIE): class VLiveIE(NaverBaseIE):
_APP_ID = '8c6cc7b45d2568fb668be6e05b6e5a3b'
class VLiveIE(VLiveBaseIE):
IE_NAME = 'vlive' IE_NAME = 'vlive'
_VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/(?:video|embed)/(?P<id>[0-9]+)' _VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/(?:video|post)/(?P<id>(?:\d-)?[0-9]+)'
_NETRC_MACHINE = 'vlive' _NETRC_MACHINE = 'vlive'
_TESTS = [{ _TESTS = [{
'url': 'http://www.vlive.tv/video/1326', 'url': 'https://www.vlive.tv/video/1326',
'md5': 'cc7314812855ce56de70a06a27314983', 'md5': 'cc7314812855ce56de70a06a27314983',
'info_dict': { 'info_dict': {
'id': '1326', 'id': '1326',
'ext': 'mp4', 'ext': 'mp4',
'title': "Girl's Day's Broadcast", 'title': "[V LIVE] Girl's Day's Broadcast",
'creator': "Girl's Day", 'creator': "Girl's Day",
'view_count': int, 'view_count': int,
'uploader_id': 'muploader_a', 'uploader_id': 'muploader_a',
}, },
}, { },
'url': 'http://www.vlive.tv/video/16937', {
'url': 'https://vlive.tv/post/1-18244258',
'md5': 'cc7314812855ce56de70a06a27314983',
'info_dict': {
'id': '1326',
'ext': 'mp4',
'title': "[V LIVE] Girl's Day's Broadcast",
'creator': "Girl's Day",
'view_count': int,
'uploader_id': 'muploader_a',
},
},
{
'url': 'https://www.vlive.tv/video/16937',
'info_dict': { 'info_dict': {
'id': '16937', 'id': '16937',
'ext': 'mp4', 'ext': 'mp4',
'title': '첸백시 걍방', 'title': '[V LIVE] 첸백시 걍방',
'creator': 'EXO', 'creator': 'EXO',
'view_count': int, 'view_count': int,
'subtitles': 'mincount:12', 'subtitles': 'mincount:12',
@ -66,11 +70,12 @@ class VLiveIE(VLiveBaseIE):
'subtitles': 'mincount:10', 'subtitles': 'mincount:10',
}, },
'skip': 'This video is only available for CH+ subscribers', 'skip': 'This video is only available for CH+ subscribers',
}, {
'url': 'https://www.vlive.tv/embed/1326',
'only_matching': True,
}] }]
@classmethod
def suitable(cls, url):
return False if VLivePlaylistIE.suitable(url) else super(VLiveIE, cls).suitable(url)
def _real_initialize(self): def _real_initialize(self):
self._login() self._login()
@ -102,82 +107,118 @@ class VLiveIE(VLiveBaseIE):
if not is_logged_in(): if not is_logged_in():
raise ExtractorError('Unable to log in', expected=True) raise ExtractorError('Unable to log in', expected=True)
def _call_api(self, path_template, video_id, fields=None):
query = {'appId': self._APP_ID}
if fields:
query['fields'] = fields
return self._download_json(
'https://www.vlive.tv/globalv-web/vam-web/' + path_template % video_id, video_id,
'Downloading %s JSON metadata' % path_template.split('/')[-1].split('-')[0],
headers={'Referer': 'https://www.vlive.tv/'}, query=query)
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) # url may match on a post or a video url with a post_id potentially matching a video_id
working_id = self._match_id(url)
webpage = self._download_webpage(url, working_id)
try: PARAMS_RE = r'window\.__PRELOADED_STATE__\s*=\s*({.*});?\s*</script>'
post = self._call_api( PARAMS_FIELD = 'params'
'post/v1.0/officialVideoPost-%s', video_id,
'author{nickname},channel{channelCode,channelName},officialVideo{commentCount,exposeStatus,likeCount,playCount,playTime,status,title,type,vodId}')
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
self.raise_login_required(json.loads(e.cause.read().decode())['message'])
raise
video = post['officialVideo'] params = self._search_regex(
PARAMS_RE, webpage, PARAMS_FIELD, default='', flags=re.DOTALL)
params = self._parse_json(params, working_id, fatal=False)
def get_common_fields(): video_params = try_get(params, lambda x: x["postDetail"]["post"]["officialVideo"], dict)
channel = post.get('channel') or {}
if video_params is None:
error = try_get(params, lambda x: x["postDetail"]["error"], dict)
error_data = try_get(error, lambda x: x["data"], dict)
error_video = try_get(error_data, lambda x: x["officialVideo"], dict)
error_msg = try_get(error, lambda x: x["message"], compat_str)
product_type = try_get(error_data,
[lambda x: x["officialVideo"]["productType"],
lambda x: x["board"]["boardType"]],
compat_str)
if error_video is not None:
if product_type in ('VLIVE_PLUS', 'VLIVE+'):
self.raise_login_required('This video is only available with V LIVE+.')
elif error_msg is not None:
raise ExtractorError('V LIVE reported the following error: %s' % error_msg)
else:
raise ExtractorError('Failed to extract video parameters.')
elif 'post' in url:
raise ExtractorError('Url does not appear to be a video post.', expected=True)
else:
raise ExtractorError('Failed to extract video parameters.')
video_id = working_id if 'video' in url else str(video_params["videoSeq"])
video_type = video_params["type"]
if video_type in ('VOD'):
encoding_status = video_params["encodingStatus"]
if encoding_status == 'COMPLETE':
return self._replay(video_id, webpage, params, video_params)
else:
raise ExtractorError('VOD encoding not yet complete. Please try again later.',
expected=True)
elif video_type in ('LIVE'):
video_status = video_params["status"]
if video_status in ('RESERVED'):
raise ExtractorError('Coming soon!', expected=True)
elif video_status in ('ENDED', 'END'):
raise ExtractorError('Uploading for replay. Please wait...', expected=True)
else:
return self._live(video_id, webpage, params)
else:
raise ExtractorError('Unknown video type %s' % video_type)
def _get_common_fields(self, webpage, params):
title = self._og_search_title(webpage)
description = self._html_search_meta(
['og:description', 'description', 'twitter:description'],
webpage, 'description', default=None)
creator = (try_get(params, lambda x: x["channel"]["channel"]["channelName"], compat_str)
or self._search_regex(r'on (.*) channel', description or '', 'creator', fatal=False))
thumbnail = self._og_search_thumbnail(webpage)
return { return {
'title': video.get('title'), 'title': title,
'creator': post.get('author', {}).get('nickname'), 'creator': creator,
'channel': channel.get('channelName'), 'thumbnail': thumbnail,
'channel_id': channel.get('channelCode'),
'duration': int_or_none(video.get('playTime')),
'view_count': int_or_none(video.get('playCount')),
'like_count': int_or_none(video.get('likeCount')),
'comment_count': int_or_none(video.get('commentCount')),
} }
video_type = video.get('type') def _live(self, video_id, webpage, params):
if video_type == 'VOD': LIVE_INFO_ENDPOINT = 'https://www.vlive.tv/globalv-web/vam-web/old/v3/live/%s/playInfo' % video_id
inkey = self._call_api('video/v1.0/vod/%s/inkey', video_id)['inkey'] play_info = self._download_json(LIVE_INFO_ENDPOINT, video_id,
vod_id = video['vodId'] headers={"referer": "https://www.vlive.tv"})
return merge_dicts(
get_common_fields(), streams = try_get(play_info, lambda x: x["result"]["streamList"], list) or []
self._extract_video_info(video_id, vod_id, inkey))
elif video_type == 'LIVE': formats = []
status = video.get('status') for stream in streams:
if status == 'ON_AIR': formats.extend(self._extract_m3u8_formats(
stream_url = self._call_api( stream['serviceUrl'], video_id, 'mp4',
'old/v3/live/%s/playInfo', fatal=False, live=True))
video_id)['result']['adaptiveStreamUrl'] self._sort_formats(formats)
formats = self._extract_m3u8_formats(stream_url, video_id, 'mp4')
info = get_common_fields() info = self._get_common_fields(webpage, params)
info.update({ info.update({
'title': self._live_title(video['title']), 'title': self._live_title(info['title']),
'id': video_id, 'id': video_id,
'formats': formats, 'formats': formats,
'is_live': True, 'is_live': True,
}) })
return info return info
elif status == 'ENDED':
raise ExtractorError( def _replay(self, video_id, webpage, params, video_params):
'Uploading for replay. Please wait...', expected=True) long_video_id = video_params["vodId"]
elif status == 'RESERVED':
raise ExtractorError('Coming soon!', expected=True) VOD_KEY_ENDPOINT = 'https://www.vlive.tv/globalv-web/vam-web/video/v1.0/vod/%s/inkey' % video_id
elif video.get('exposeStatus') == 'CANCEL': key_json = self._download_json(VOD_KEY_ENDPOINT, video_id,
raise ExtractorError( headers={"referer": "https://www.vlive.tv"})
'We are sorry, but the live broadcast has been canceled.', key = key_json["inkey"]
expected=True)
else: return merge_dicts(
raise ExtractorError('Unknown status ' + status) self._get_common_fields(webpage, params),
self._extract_video_info(video_id, long_video_id, key))
class VLiveChannelIE(VLiveBaseIE): class VLiveChannelIE(InfoExtractor):
IE_NAME = 'vlive:channel' IE_NAME = 'vlive:channel'
_VALID_URL = r'https?://(?:channels\.vlive\.tv|(?:(?:www|m)\.)?vlive\.tv/channel)/(?P<id>[0-9A-Z]+)' _VALID_URL = r'https?://(?:(?:www|m)\.)?(?:channels\.vlive\.tv/|vlive\.tv/channels?/)(?P<id>[0-9A-Z]+)'
_TESTS = [{ _TESTS = [{
'url': 'http://channels.vlive.tv/FCD4B', 'url': 'https://channels.vlive.tv/FCD4B',
'info_dict': { 'info_dict': {
'id': 'FCD4B', 'id': 'FCD4B',
'title': 'MAMAMOO', 'title': 'MAMAMOO',
@ -185,39 +226,63 @@ class VLiveChannelIE(VLiveBaseIE):
'playlist_mincount': 110 'playlist_mincount': 110
}, { }, {
'url': 'https://www.vlive.tv/channel/FCD4B', 'url': 'https://www.vlive.tv/channel/FCD4B',
'only_matching': True, 'info_dict': {
'id': 'FCD4B',
'title': 'MAMAMOO',
},
'playlist_mincount': 110
}] }]
_APP_ID = '8c6cc7b45d2568fb668be6e05b6e5a3b'
def _call_api(self, path, channel_key_suffix, channel_value, note, query):
q = {
'app_id': self._APP_ID,
'channel' + channel_key_suffix: channel_value,
}
q.update(query)
return self._download_json(
'http://api.vfan.vlive.tv/vproxy/channelplus/' + path,
channel_value, note='Downloading ' + note, query=q)['result']
def _real_extract(self, url): def _real_extract(self, url):
channel_code = self._match_id(url) channel_code = self._match_id(url)
channel_seq = self._call_api( webpage = self._download_webpage(
'decodeChannelCode', 'Code', channel_code, 'http://channels.vlive.tv/%s/video' % channel_code, channel_code)
'decode channel code', {})['channelSeq']
app_id = None
app_js_url = self._search_regex(
r'<script[^>]+src=(["\'])(?P<url>http.+?/app\.js.*?)\1',
webpage, 'app js', default=None, group='url')
if app_js_url:
app_js = self._download_webpage(
app_js_url, channel_code, 'Downloading app JS', fatal=False)
if app_js:
app_id = self._search_regex(
r'Global\.VFAN_APP_ID\s*=\s*[\'"]([^\'"]+)[\'"]',
app_js, 'app id', default=None)
app_id = app_id or self._APP_ID
channel_info = self._download_json(
'http://api.vfan.vlive.tv/vproxy/channelplus/decodeChannelCode',
channel_code, note='Downloading decode channel code',
query={
'app_id': app_id,
'channelCode': channel_code,
'_': int(time.time())
})
channel_seq = channel_info['result']['channelSeq']
channel_name = None channel_name = None
entries = [] entries = []
for page_num in itertools.count(1): for page_num in itertools.count(1):
video_list = self._call_api( video_list = self._download_json(
'getChannelVideoList', 'Seq', channel_seq, 'http://api.vfan.vlive.tv/vproxy/channelplus/getChannelVideoList',
'channel list page #%d' % page_num, { channel_code, note='Downloading channel list page #%d' % page_num,
query={
'app_id': app_id,
'channelSeq': channel_seq,
# Large values of maxNumOfRows (~300 or above) may cause # Large values of maxNumOfRows (~300 or above) may cause
# empty responses (see [1]), e.g. this happens for [2] that # empty responses (see [1]), e.g. this happens for [2] that
# has more than 300 videos. # has more than 300 videos.
# 1. https://github.com/ytdl-org/youtube-dl/issues/13830 # 1. https://github.com/ytdl-org/youtube-dl/issues/13830
# 2. http://channels.vlive.tv/EDBF. # 2. http://channels.vlive.tv/EDBF.
'maxNumOfRows': 100, 'maxNumOfRows': 100,
'_': int(time.time()),
'pageNo': page_num 'pageNo': page_num
} }
) )
@ -225,11 +290,11 @@ class VLiveChannelIE(VLiveBaseIE):
if not channel_name: if not channel_name:
channel_name = try_get( channel_name = try_get(
video_list, video_list,
lambda x: x['channelInfo']['channelName'], lambda x: x['result']['channelInfo']['channelName'],
compat_str) compat_str)
videos = try_get( videos = try_get(
video_list, lambda x: x['videoList'], list) video_list, lambda x: x['result']['videoList'], list)
if not videos: if not videos:
break break
@ -247,9 +312,7 @@ class VLiveChannelIE(VLiveBaseIE):
entries, channel_code, channel_name) entries, channel_code, channel_name)
# old extractor. Rewrite? class VLivePlaylistIE(InfoExtractor):
class VLivePlaylistIE(VLiveBaseIE):
IE_NAME = 'vlive:playlist' IE_NAME = 'vlive:playlist'
_VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/video/(?P<video_id>[0-9]+)/playlist/(?P<id>[0-9]+)' _VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/video/(?P<video_id>[0-9]+)/playlist/(?P<id>[0-9]+)'
_VIDEO_URL_TEMPLATE = 'http://www.vlive.tv/video/%s' _VIDEO_URL_TEMPLATE = 'http://www.vlive.tv/video/%s'