Fix YahooSearchIE: (closes #300)

This commit is contained in:
Jaime Marquínez Ferrándiz 2013-05-12 17:49:35 +02:00
parent 2f58b12dad
commit 5a853e1423
1 changed files with 29 additions and 49 deletions

View File

@ -1456,18 +1456,10 @@ class GoogleSearchIE(InfoExtractor):
class YahooSearchIE(InfoExtractor): class YahooSearchIE(InfoExtractor):
"""Information Extractor for Yahoo! Video search queries.""" """Information Extractor for Yahoo! Video search queries."""
_WORKING = False
_VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+' _VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+'
_TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
_VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
_MORE_PAGES_INDICATOR = r'\s*Next'
_max_yahoo_results = 1000
IE_NAME = u'video.yahoo:search'
def report_download_page(self, query, pagenum): _max_yahoo_results = 1000
"""Report attempt to download playlist page with given number.""" IE_NAME = u'screen.yahoo:search'
query = query.decode(preferredencoding())
self.to_screen(u'query "%s": Downloading page %s' % (query, pagenum))
def _real_extract(self, query): def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query) mobj = re.match(self._VALID_URL, query)
@ -1478,11 +1470,9 @@ class YahooSearchIE(InfoExtractor):
prefix = prefix[8:] prefix = prefix[8:]
query = query.encode('utf-8') query = query.encode('utf-8')
if prefix == '': if prefix == '':
self._download_n_results(query, 1) return self._get_n_results(query, 1)
return
elif prefix == 'all': elif prefix == 'all':
self._download_n_results(query, self._max_yahoo_results) return self._get_n_results(query, self._max_yahoo_results)
return
else: else:
try: try:
n = int(prefix) n = int(prefix)
@ -1491,46 +1481,36 @@ class YahooSearchIE(InfoExtractor):
elif n > self._max_yahoo_results: elif n > self._max_yahoo_results:
self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n)) self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
n = self._max_yahoo_results n = self._max_yahoo_results
self._download_n_results(query, n) return self._get_n_results(query, n)
return
except ValueError: # parsing prefix as integer fails except ValueError: # parsing prefix as integer fails
self._download_n_results(query, 1) return self._get_n_results(query, 1)
return
def _download_n_results(self, query, n): def _get_n_results(self, query, n):
"""Downloads a specified number of results for a query""" """Get a specified number of results for a query"""
video_ids = [] res = {
already_seen = set() '_type': 'playlist',
pagenum = 1 'id': query,
'entries': []
}
for pagenum in itertools.count(0):
result_url = u'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
webpage = self._download_webpage(result_url, query,
note='Downloading results page '+str(pagenum+1))
info = json.loads(webpage)
m = info[u'm']
results = info[u'results']
while True: for (i, r) in enumerate(results):
self.report_download_page(query, pagenum) if (pagenum * 30) +i >= n:
result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum) break
request = compat_urllib_request.Request(result_url) mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
try: e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
page = compat_urllib_request.urlopen(request).read() res['entries'].append(e)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: if (pagenum * 30 +i >= n) or (m[u'last'] >= (m[u'total'] -1 )):
raise ExtractorError(u'Unable to download webpage: %s' % compat_str(err)) break
# Extract video identifiers return res
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
video_id = mobj.group(1)
if video_id not in already_seen:
video_ids.append(video_id)
already_seen.add(video_id)
if len(video_ids) == n:
# Specified n videos reached
for id in video_ids:
self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
return
if re.search(self._MORE_PAGES_INDICATOR, page) is None:
for id in video_ids:
self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
return
pagenum = pagenum + 1
class YoutubePlaylistIE(InfoExtractor): class YoutubePlaylistIE(InfoExtractor):