yt-dlc/youtube_dlc/extractor/googlesearch.py

60 lines
1.7 KiB
Python
Raw Normal View History

from __future__ import unicode_literals
2013-06-23 18:32:49 +00:00
import itertools
import re
from .common import SearchInfoExtractor
class GoogleSearchIE(SearchInfoExtractor):
IE_DESC = 'Google Video search'
2013-06-23 18:32:49 +00:00
_MAX_RESULTS = 1000
IE_NAME = 'video.google:search'
2013-06-23 18:32:49 +00:00
_SEARCH_KEY = 'gvsearch'
2014-08-25 15:02:52 +00:00
_TEST = {
'url': 'gvsearch15:python language',
'info_dict': {
'id': 'python language',
'title': 'python language',
},
'playlist_count': 15,
}
2013-06-23 18:32:49 +00:00
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
entries = []
2013-06-23 18:32:49 +00:00
res = {
'_type': 'playlist',
'id': query,
'title': query,
2013-06-23 18:32:49 +00:00
}
for pagenum in itertools.count():
webpage = self._download_webpage(
'http://www.google.com/search',
'gvsearch:' + query,
note='Downloading result page %s' % (pagenum + 1),
query={
'tbm': 'vid',
'q': query,
'start': pagenum * 10,
'hl': 'en',
})
for hit_idx, mobj in enumerate(re.finditer(
r'<h3 class="r"><a href="([^"]+)"', webpage)):
# Skip playlists
if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage):
continue
2013-06-23 18:32:49 +00:00
entries.append({
2013-06-23 18:32:49 +00:00
'_type': 'url',
'url': mobj.group(1)
})
2013-06-23 18:32:49 +00:00
if (len(entries) >= n) or not re.search(r'id="pnnext"', webpage):
res['entries'] = entries[:n]
2013-06-23 18:32:49 +00:00
return res