yt-dlc/youtube_dlc/extractor/deezer.py

148 lines
5.2 KiB
Python
Raw Normal View History

2014-09-12 05:25:58 +00:00
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
2014-09-13 06:51:21 +00:00
ExtractorError,
2014-09-12 05:25:58 +00:00
int_or_none,
orderedSet,
)
2019-08-18 21:58:49 +00:00
class DeezerBaseInfoExtractor(InfoExtractor):
2019-08-18 21:39:06 +00:00
def get_data(self, url):
2019-08-18 21:45:03 +00:00
if not self._downloader.params.get('test'):
2014-09-12 05:25:58 +00:00
self._downloader.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
mobj = re.match(self._VALID_URL, url)
2019-09-02 17:32:06 +00:00
data_id = mobj.group('id')
2014-09-12 05:25:58 +00:00
2019-09-02 17:32:06 +00:00
webpage = self._download_webpage(url, data_id)
2014-09-13 06:51:21 +00:00
geoblocking_msg = self._html_search_regex(
r'<p class="soon-txt">(.*?)</p>', webpage, 'geoblocking message',
default=None)
if geoblocking_msg is not None:
raise ExtractorError(
'Deezer said: %s' % geoblocking_msg, expected=True)
2014-09-12 05:25:58 +00:00
data_json = self._search_regex(
2016-04-05 16:27:33 +00:00
(r'__DZR_APP_STATE__\s*=\s*({.+?})\s*</script>',
r'naboo\.display\(\'[^\']+\',\s*(.*?)\);\n'),
webpage, 'data JSON')
2014-09-12 05:25:58 +00:00
data = json.loads(data_json)
2019-09-02 17:32:06 +00:00
return data_id, webpage, data
2019-08-18 21:39:06 +00:00
2019-08-18 21:58:49 +00:00
2019-08-18 21:39:06 +00:00
class DeezerPlaylistIE(DeezerBaseInfoExtractor):
_VALID_URL = r'https?://(?:www\.)?deezer\.com/(../)?playlist/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.deezer.com/playlist/176747451',
'info_dict': {
'id': '176747451',
'title': 'Best!',
'uploader': 'anonymous',
'thumbnail': r're:^https?://(e-)?cdns-images\.dzcdn\.net/images/cover/.*\.jpg$',
},
'playlist_count': 29,
}
def _real_extract(self, url):
playlist_id, webpage, data = self.get_data(url)
2014-09-12 05:25:58 +00:00
2019-09-02 17:32:06 +00:00
playlist_title = data.get('DATA', {}).get('TITLE')
playlist_uploader = data.get('DATA', {}).get('PARENT_USERNAME')
2014-09-12 05:25:58 +00:00
playlist_thumbnail = self._search_regex(
r'<img id="naboo_playlist_image".*?src="([^"]+)"', webpage,
'playlist thumbnail')
entries = []
2019-09-02 17:32:06 +00:00
for s in data.get('SONGS', {}).get('data'):
2014-09-12 05:25:58 +00:00
formats = [{
'format_id': 'preview',
2019-09-02 17:32:06 +00:00
'url': s.get('MEDIA', [{}])[0].get('HREF'),
2014-09-12 05:25:58 +00:00
'preference': -100, # Only the first 30 seconds
'ext': 'mp3',
}]
self._sort_formats(formats)
artists = ', '.join(
2019-08-13 10:46:06 +00:00
orderedSet(a.get('ART_NAME') for a in s.get('ARTISTS')))
2014-09-12 05:25:58 +00:00
entries.append({
2019-08-13 10:46:06 +00:00
'id': s.get('SNG_ID'),
2014-09-12 05:25:58 +00:00
'duration': int_or_none(s.get('DURATION')),
2019-08-13 10:46:06 +00:00
'title': '%s - %s' % (artists, s.get('SNG_TITLE')),
'uploader': s.get('ART_NAME'),
'uploader_id': s.get('ART_ID'),
2014-09-12 05:25:58 +00:00
'age_limit': 16 if s.get('EXPLICIT_LYRICS') == '1' else 0,
'formats': formats,
})
return {
'_type': 'playlist',
'id': playlist_id,
'title': playlist_title,
'uploader': playlist_uploader,
'thumbnail': playlist_thumbnail,
'entries': entries,
}
2019-08-18 21:41:35 +00:00
2019-08-18 21:58:49 +00:00
2019-08-18 21:41:35 +00:00
class DeezerAlbumIE(DeezerBaseInfoExtractor):
_VALID_URL = r'https?://(?:www\.)?deezer\.com/(../)?album/(?P<id>[0-9]+)'
_TEST = {
'url': 'https://www.deezer.com/fr/album/67505622',
'info_dict': {
'id': '67505622',
'title': 'Last Week',
'uploader': 'Home Brew',
'thumbnail': r're:^https?://(e-)?cdns-images\.dzcdn\.net/images/cover/.*\.jpg$',
},
'playlist_count': 7,
}
def _real_extract(self, url):
album_id, webpage, data = self.get_data(url)
2019-09-02 17:32:06 +00:00
album_title = data.get('DATA', {}).get('ALB_TITLE')
album_uploader = data.get('DATA', {}).get('ART_NAME')
2019-08-18 21:41:35 +00:00
album_thumbnail = self._search_regex(
r'<img id="naboo_album_image".*?src="([^"]+)"', webpage,
'album thumbnail')
entries = []
2019-09-02 17:32:06 +00:00
for s in data.get('SONGS', {}).get('data'):
2019-08-18 21:41:35 +00:00
formats = [{
'format_id': 'preview',
2019-09-02 17:32:06 +00:00
'url': s.get('MEDIA', [{}])[0].get('HREF'),
2019-08-18 21:41:35 +00:00
'preference': -100, # Only the first 30 seconds
'ext': 'mp3',
}]
self._sort_formats(formats)
artists = ', '.join(
orderedSet(a.get('ART_NAME') for a in s.get('ARTISTS')))
entries.append({
'id': s.get('SNG_ID'),
'duration': int_or_none(s.get('DURATION')),
'title': '%s - %s' % (artists, s.get('SNG_TITLE')),
'uploader': s.get('ART_NAME'),
'uploader_id': s.get('ART_ID'),
'age_limit': 16 if s.get('EXPLICIT_LYRICS') == '1' else 0,
'formats': formats,
2019-08-18 21:58:49 +00:00
'track': s.get('SNG_TITLE'),
2019-08-18 22:37:51 +00:00
'track_number': int_or_none(s.get('TRACK_NUMBER')),
2019-08-18 21:58:49 +00:00
'track_id': s.get('SNG_ID'),
'artist': album_uploader,
'album': album_title,
'album_artist': album_uploader,
2019-08-18 21:41:35 +00:00
})
return {
'_type': 'playlist',
'id': album_id,
'title': album_title,
'uploader': album_uploader,
'thumbnail': album_thumbnail,
'entries': entries,
2019-08-18 21:58:49 +00:00
}