mirror of
				https://github.com/yt-dlp/yt-dlp.git
				synced 2025-11-04 08:30:46 +00:00 
			
		
		
		
	[cleanup] Delete unused extractors
This commit is contained in:
		@@ -1,84 +0,0 @@
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
from ..utils import (
 | 
			
		||||
    remove_start,
 | 
			
		||||
    int_or_none,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BlinkxIE(InfoExtractor):
 | 
			
		||||
    _VALID_URL = r'(?:https?://(?:www\.)blinkx\.com/#?ce/|blinkx:)(?P<id>[^?]+)'
 | 
			
		||||
    IE_NAME = 'blinkx'
 | 
			
		||||
 | 
			
		||||
    _TEST = {
 | 
			
		||||
        'url': 'http://www.blinkx.com/ce/Da0Gw3xc5ucpNduzLuDDlv4WC9PuI4fDi1-t6Y3LyfdY2SZS5Urbvn-UPJvrvbo8LTKTc67Wu2rPKSQDJyZeeORCR8bYkhs8lI7eqddznH2ofh5WEEdjYXnoRtj7ByQwt7atMErmXIeYKPsSDuMAAqJDlQZ-3Ff4HJVeH_s3Gh8oQ',
 | 
			
		||||
        'md5': '337cf7a344663ec79bf93a526a2e06c7',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': 'Da0Gw3xc',
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'title': 'No Daily Show for John Oliver; HBO Show Renewed - IGN News',
 | 
			
		||||
            'uploader': 'IGN News',
 | 
			
		||||
            'upload_date': '20150217',
 | 
			
		||||
            'timestamp': 1424215740,
 | 
			
		||||
            'description': 'HBO has renewed Last Week Tonight With John Oliver for two more seasons.',
 | 
			
		||||
            'duration': 47.743333,
 | 
			
		||||
        },
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        video_id = self._match_id(url)
 | 
			
		||||
        display_id = video_id[:8]
 | 
			
		||||
 | 
			
		||||
        api_url = ('https://apib4.blinkx.com/api.php?action=play_video&'
 | 
			
		||||
                   + 'video=%s' % video_id)
 | 
			
		||||
        data_json = self._download_webpage(api_url, display_id)
 | 
			
		||||
        data = json.loads(data_json)['api']['results'][0]
 | 
			
		||||
        duration = None
 | 
			
		||||
        thumbnails = []
 | 
			
		||||
        formats = []
 | 
			
		||||
        for m in data['media']:
 | 
			
		||||
            if m['type'] == 'jpg':
 | 
			
		||||
                thumbnails.append({
 | 
			
		||||
                    'url': m['link'],
 | 
			
		||||
                    'width': int(m['w']),
 | 
			
		||||
                    'height': int(m['h']),
 | 
			
		||||
                })
 | 
			
		||||
            elif m['type'] == 'original':
 | 
			
		||||
                duration = float(m['d'])
 | 
			
		||||
            elif m['type'] == 'youtube':
 | 
			
		||||
                yt_id = m['link']
 | 
			
		||||
                self.to_screen('Youtube video detected: %s' % yt_id)
 | 
			
		||||
                return self.url_result(yt_id, 'Youtube', video_id=yt_id)
 | 
			
		||||
            elif m['type'] in ('flv', 'mp4'):
 | 
			
		||||
                vcodec = remove_start(m['vcodec'], 'ff')
 | 
			
		||||
                acodec = remove_start(m['acodec'], 'ff')
 | 
			
		||||
                vbr = int_or_none(m.get('vbr') or m.get('vbitrate'), 1000)
 | 
			
		||||
                abr = int_or_none(m.get('abr') or m.get('abitrate'), 1000)
 | 
			
		||||
                tbr = vbr + abr if vbr and abr else None
 | 
			
		||||
                format_id = '%s-%sk-%s' % (vcodec, tbr, m['w'])
 | 
			
		||||
                formats.append({
 | 
			
		||||
                    'format_id': format_id,
 | 
			
		||||
                    'url': m['link'],
 | 
			
		||||
                    'vcodec': vcodec,
 | 
			
		||||
                    'acodec': acodec,
 | 
			
		||||
                    'abr': abr,
 | 
			
		||||
                    'vbr': vbr,
 | 
			
		||||
                    'tbr': tbr,
 | 
			
		||||
                    'width': int_or_none(m.get('w')),
 | 
			
		||||
                    'height': int_or_none(m.get('h')),
 | 
			
		||||
                })
 | 
			
		||||
 | 
			
		||||
        self._sort_formats(formats)
 | 
			
		||||
 | 
			
		||||
        return {
 | 
			
		||||
            'id': display_id,
 | 
			
		||||
            'fullid': video_id,
 | 
			
		||||
            'title': data['title'],
 | 
			
		||||
            'formats': formats,
 | 
			
		||||
            'uploader': data.get('channel_name'),
 | 
			
		||||
            'timestamp': data.get('pubdate_epoch'),
 | 
			
		||||
            'description': data.get('description'),
 | 
			
		||||
            'thumbnails': thumbnails,
 | 
			
		||||
            'duration': duration,
 | 
			
		||||
        }
 | 
			
		||||
@@ -1,56 +0,0 @@
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
from ..utils import parse_duration
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DiscoveryVRIE(InfoExtractor):
 | 
			
		||||
    _VALID_URL = r'https?://(?:www\.)?discoveryvr\.com/watch/(?P<id>[^/?#]+)'
 | 
			
		||||
    _TEST = {
 | 
			
		||||
        'url': 'http://www.discoveryvr.com/watch/discovery-vr-an-introduction',
 | 
			
		||||
        'md5': '32b1929798c464a54356378b7912eca4',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': 'discovery-vr-an-introduction',
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'title': 'Discovery VR - An Introduction',
 | 
			
		||||
            'description': 'md5:80d418a10efb8899d9403e61d8790f06',
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        display_id = self._match_id(url)
 | 
			
		||||
        webpage = self._download_webpage(url, display_id)
 | 
			
		||||
 | 
			
		||||
        bootstrap_data = self._search_regex(
 | 
			
		||||
            r'root\.DVR\.bootstrapData\s+=\s+"({.+?})";',
 | 
			
		||||
            webpage, 'bootstrap data')
 | 
			
		||||
        bootstrap_data = self._parse_json(
 | 
			
		||||
            bootstrap_data.encode('utf-8').decode('unicode_escape'),
 | 
			
		||||
            display_id)
 | 
			
		||||
        videos = self._parse_json(bootstrap_data['videos'], display_id)['allVideos']
 | 
			
		||||
        video_data = next(video for video in videos if video.get('slug') == display_id)
 | 
			
		||||
 | 
			
		||||
        series = video_data.get('showTitle')
 | 
			
		||||
        title = episode = video_data.get('title') or series
 | 
			
		||||
        if series and series != title:
 | 
			
		||||
            title = '%s - %s' % (series, title)
 | 
			
		||||
 | 
			
		||||
        formats = []
 | 
			
		||||
        for f, format_id in (('cdnUriM3U8', 'mobi'), ('webVideoUrlSd', 'sd'), ('webVideoUrlHd', 'hd')):
 | 
			
		||||
            f_url = video_data.get(f)
 | 
			
		||||
            if not f_url:
 | 
			
		||||
                continue
 | 
			
		||||
            formats.append({
 | 
			
		||||
                'format_id': format_id,
 | 
			
		||||
                'url': f_url,
 | 
			
		||||
            })
 | 
			
		||||
 | 
			
		||||
        return {
 | 
			
		||||
            'id': display_id,
 | 
			
		||||
            'display_id': display_id,
 | 
			
		||||
            'title': title,
 | 
			
		||||
            'description': video_data.get('description'),
 | 
			
		||||
            'thumbnail': video_data.get('thumbnail'),
 | 
			
		||||
            'duration': parse_duration(video_data.get('runTime')),
 | 
			
		||||
            'formats': formats,
 | 
			
		||||
            'episode': episode,
 | 
			
		||||
            'series': series,
 | 
			
		||||
        }
 | 
			
		||||
@@ -1,73 +0,0 @@
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
from ..utils import (
 | 
			
		||||
    ExtractorError,
 | 
			
		||||
    sanitized_Request,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class EveryonesMixtapeIE(InfoExtractor):
 | 
			
		||||
    _VALID_URL = r'https?://(?:www\.)?everyonesmixtape\.com/#/mix/(?P<id>[0-9a-zA-Z]+)(?:/(?P<songnr>[0-9]))?$'
 | 
			
		||||
 | 
			
		||||
    _TESTS = [{
 | 
			
		||||
        'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': '5bfseWNmlds',
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'title': "Passion Pit - \"Sleepyhead\" (Official Music Video)",
 | 
			
		||||
            'uploader': 'FKR.TV',
 | 
			
		||||
            'uploader_id': 'frenchkissrecords',
 | 
			
		||||
            'description': "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
 | 
			
		||||
            'upload_date': '20081015'
 | 
			
		||||
        },
 | 
			
		||||
        'params': {
 | 
			
		||||
            'skip_download': True,  # This is simply YouTube
 | 
			
		||||
        }
 | 
			
		||||
    }, {
 | 
			
		||||
        'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': 'm7m0jJAbMQi',
 | 
			
		||||
            'title': 'Driving',
 | 
			
		||||
        },
 | 
			
		||||
        'playlist_count': 24
 | 
			
		||||
    }]
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        mobj = self._match_valid_url(url)
 | 
			
		||||
        playlist_id = mobj.group('id')
 | 
			
		||||
 | 
			
		||||
        pllist_url = 'http://everyonesmixtape.com/mixtape.php?a=getMixes&u=-1&linked=%s&explore=' % playlist_id
 | 
			
		||||
        pllist_req = sanitized_Request(pllist_url)
 | 
			
		||||
        pllist_req.add_header('X-Requested-With', 'XMLHttpRequest')
 | 
			
		||||
 | 
			
		||||
        playlist_list = self._download_json(
 | 
			
		||||
            pllist_req, playlist_id, note='Downloading playlist metadata')
 | 
			
		||||
        try:
 | 
			
		||||
            playlist_no = next(playlist['id']
 | 
			
		||||
                               for playlist in playlist_list
 | 
			
		||||
                               if playlist['code'] == playlist_id)
 | 
			
		||||
        except StopIteration:
 | 
			
		||||
            raise ExtractorError('Playlist id not found')
 | 
			
		||||
 | 
			
		||||
        pl_url = 'http://everyonesmixtape.com/mixtape.php?a=getMix&id=%s&userId=null&code=' % playlist_no
 | 
			
		||||
        pl_req = sanitized_Request(pl_url)
 | 
			
		||||
        pl_req.add_header('X-Requested-With', 'XMLHttpRequest')
 | 
			
		||||
        playlist = self._download_json(
 | 
			
		||||
            pl_req, playlist_id, note='Downloading playlist info')
 | 
			
		||||
 | 
			
		||||
        entries = [{
 | 
			
		||||
            '_type': 'url',
 | 
			
		||||
            'url': t['url'],
 | 
			
		||||
            'title': t['title'],
 | 
			
		||||
        } for t in playlist['tracks']]
 | 
			
		||||
 | 
			
		||||
        if mobj.group('songnr'):
 | 
			
		||||
            songnr = int(mobj.group('songnr')) - 1
 | 
			
		||||
            return entries[songnr]
 | 
			
		||||
 | 
			
		||||
        playlist_title = playlist['mixData']['name']
 | 
			
		||||
        return {
 | 
			
		||||
            '_type': 'playlist',
 | 
			
		||||
            'id': playlist_id,
 | 
			
		||||
            'title': playlist_title,
 | 
			
		||||
            'entries': entries,
 | 
			
		||||
        }
 | 
			
		||||
@@ -1,74 +0,0 @@
 | 
			
		||||
from .adobepass import AdobePassIE
 | 
			
		||||
from ..utils import (
 | 
			
		||||
    extract_attributes,
 | 
			
		||||
    int_or_none,
 | 
			
		||||
    parse_age_limit,
 | 
			
		||||
    smuggle_url,
 | 
			
		||||
    update_url_query,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class FXNetworksIE(AdobePassIE):
 | 
			
		||||
    _VALID_URL = r'https?://(?:www\.)?(?:fxnetworks|simpsonsworld)\.com/video/(?P<id>\d+)'
 | 
			
		||||
    _TESTS = [{
 | 
			
		||||
        'url': 'http://www.fxnetworks.com/video/1032565827847',
 | 
			
		||||
        'md5': '8d99b97b4aa7a202f55b6ed47ea7e703',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': 'dRzwHC_MMqIv',
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'title': 'First Look: Better Things - Season 2',
 | 
			
		||||
            'description': 'Because real life is like a fart. Watch this FIRST LOOK to see what inspired the new season of Better Things.',
 | 
			
		||||
            'age_limit': 14,
 | 
			
		||||
            'uploader': 'NEWA-FNG-FX',
 | 
			
		||||
            'upload_date': '20170825',
 | 
			
		||||
            'timestamp': 1503686274,
 | 
			
		||||
            'episode_number': 0,
 | 
			
		||||
            'season_number': 2,
 | 
			
		||||
            'series': 'Better Things',
 | 
			
		||||
        },
 | 
			
		||||
        'add_ie': ['ThePlatform'],
 | 
			
		||||
    }, {
 | 
			
		||||
        'url': 'http://www.simpsonsworld.com/video/716094019682',
 | 
			
		||||
        'only_matching': True,
 | 
			
		||||
    }]
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        video_id = self._match_id(url)
 | 
			
		||||
        webpage = self._download_webpage(url, video_id)
 | 
			
		||||
        if 'The content you are trying to access is not available in your region.' in webpage:
 | 
			
		||||
            self.raise_geo_restricted()
 | 
			
		||||
        video_data = extract_attributes(self._search_regex(
 | 
			
		||||
            r'(<a.+?rel="https?://link\.theplatform\.com/s/.+?</a>)', webpage, 'video data'))
 | 
			
		||||
        player_type = self._search_regex(r'playerType\s*=\s*[\'"]([^\'"]+)', webpage, 'player type', default=None)
 | 
			
		||||
        release_url = video_data['rel']
 | 
			
		||||
        title = video_data['data-title']
 | 
			
		||||
        rating = video_data.get('data-rating')
 | 
			
		||||
        query = {
 | 
			
		||||
            'mbr': 'true',
 | 
			
		||||
        }
 | 
			
		||||
        if player_type == 'movies':
 | 
			
		||||
            query.update({
 | 
			
		||||
                'manifest': 'm3u',
 | 
			
		||||
            })
 | 
			
		||||
        else:
 | 
			
		||||
            query.update({
 | 
			
		||||
                'switch': 'http',
 | 
			
		||||
            })
 | 
			
		||||
        if video_data.get('data-req-auth') == '1':
 | 
			
		||||
            resource = self._get_mvpd_resource(
 | 
			
		||||
                video_data['data-channel'], title,
 | 
			
		||||
                video_data.get('data-guid'), rating)
 | 
			
		||||
            query['auth'] = self._extract_mvpd_auth(url, video_id, 'fx', resource)
 | 
			
		||||
 | 
			
		||||
        return {
 | 
			
		||||
            '_type': 'url_transparent',
 | 
			
		||||
            'id': video_id,
 | 
			
		||||
            'title': title,
 | 
			
		||||
            'url': smuggle_url(update_url_query(release_url, query), {'force_smil_url': True}),
 | 
			
		||||
            'series': video_data.get('data-show-title'),
 | 
			
		||||
            'episode_number': int_or_none(video_data.get('data-episode')),
 | 
			
		||||
            'season_number': int_or_none(video_data.get('data-season')),
 | 
			
		||||
            'thumbnail': video_data.get('data-large-thumb'),
 | 
			
		||||
            'age_limit': parse_age_limit(rating),
 | 
			
		||||
            'ie_key': 'ThePlatform',
 | 
			
		||||
        }
 | 
			
		||||
@@ -1,92 +0,0 @@
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
from ..utils import (
 | 
			
		||||
    ExtractorError,
 | 
			
		||||
    float_or_none,
 | 
			
		||||
    srt_subtitles_timecode,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class KanalPlayIE(InfoExtractor):
 | 
			
		||||
    IE_DESC = 'Kanal 5/9/11 Play'
 | 
			
		||||
    _VALID_URL = r'https?://(?:www\.)?kanal(?P<channel_id>5|9|11)play\.se/(?:#!/)?(?:play/)?program/\d+/video/(?P<id>\d+)'
 | 
			
		||||
    _TESTS = [{
 | 
			
		||||
        'url': 'http://www.kanal5play.se/#!/play/program/3060212363/video/3270012277',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': '3270012277',
 | 
			
		||||
            'ext': 'flv',
 | 
			
		||||
            'title': 'Saknar både dusch och avlopp',
 | 
			
		||||
            'description': 'md5:6023a95832a06059832ae93bc3c7efb7',
 | 
			
		||||
            'duration': 2636.36,
 | 
			
		||||
        },
 | 
			
		||||
        'params': {
 | 
			
		||||
            # rtmp download
 | 
			
		||||
            'skip_download': True,
 | 
			
		||||
        }
 | 
			
		||||
    }, {
 | 
			
		||||
        'url': 'http://www.kanal9play.se/#!/play/program/335032/video/246042',
 | 
			
		||||
        'only_matching': True,
 | 
			
		||||
    }, {
 | 
			
		||||
        'url': 'http://www.kanal11play.se/#!/play/program/232835958/video/367135199',
 | 
			
		||||
        'only_matching': True,
 | 
			
		||||
    }]
 | 
			
		||||
 | 
			
		||||
    def _fix_subtitles(self, subs):
 | 
			
		||||
        return '\r\n\r\n'.join(
 | 
			
		||||
            '%s\r\n%s --> %s\r\n%s'
 | 
			
		||||
            % (
 | 
			
		||||
                num,
 | 
			
		||||
                srt_subtitles_timecode(item['startMillis'] / 1000.0),
 | 
			
		||||
                srt_subtitles_timecode(item['endMillis'] / 1000.0),
 | 
			
		||||
                item['text'],
 | 
			
		||||
            ) for num, item in enumerate(subs, 1))
 | 
			
		||||
 | 
			
		||||
    def _get_subtitles(self, channel_id, video_id):
 | 
			
		||||
        subs = self._download_json(
 | 
			
		||||
            'http://www.kanal%splay.se/api/subtitles/%s' % (channel_id, video_id),
 | 
			
		||||
            video_id, 'Downloading subtitles JSON', fatal=False)
 | 
			
		||||
        return {'sv': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]} if subs else {}
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        mobj = self._match_valid_url(url)
 | 
			
		||||
        video_id = mobj.group('id')
 | 
			
		||||
        channel_id = mobj.group('channel_id')
 | 
			
		||||
 | 
			
		||||
        video = self._download_json(
 | 
			
		||||
            'http://www.kanal%splay.se/api/getVideo?format=FLASH&videoId=%s' % (channel_id, video_id),
 | 
			
		||||
            video_id)
 | 
			
		||||
 | 
			
		||||
        reasons_for_no_streams = video.get('reasonsForNoStreams')
 | 
			
		||||
        if reasons_for_no_streams:
 | 
			
		||||
            raise ExtractorError(
 | 
			
		||||
                '%s returned error: %s' % (self.IE_NAME, '\n'.join(reasons_for_no_streams)),
 | 
			
		||||
                expected=True)
 | 
			
		||||
 | 
			
		||||
        title = video['title']
 | 
			
		||||
        description = video.get('description')
 | 
			
		||||
        duration = float_or_none(video.get('length'), 1000)
 | 
			
		||||
        thumbnail = video.get('posterUrl')
 | 
			
		||||
 | 
			
		||||
        stream_base_url = video['streamBaseUrl']
 | 
			
		||||
 | 
			
		||||
        formats = [{
 | 
			
		||||
            'url': stream_base_url,
 | 
			
		||||
            'play_path': stream['source'],
 | 
			
		||||
            'ext': 'flv',
 | 
			
		||||
            'tbr': float_or_none(stream.get('bitrate'), 1000),
 | 
			
		||||
            'rtmp_real_time': True,
 | 
			
		||||
        } for stream in video['streams']]
 | 
			
		||||
        self._sort_formats(formats)
 | 
			
		||||
 | 
			
		||||
        subtitles = {}
 | 
			
		||||
        if video.get('hasSubtitle'):
 | 
			
		||||
            subtitles = self.extract_subtitles(channel_id, video_id)
 | 
			
		||||
 | 
			
		||||
        return {
 | 
			
		||||
            'id': video_id,
 | 
			
		||||
            'title': title,
 | 
			
		||||
            'description': description,
 | 
			
		||||
            'thumbnail': thumbnail,
 | 
			
		||||
            'duration': duration,
 | 
			
		||||
            'formats': formats,
 | 
			
		||||
            'subtitles': subtitles,
 | 
			
		||||
        }
 | 
			
		||||
@@ -1,225 +0,0 @@
 | 
			
		||||
import re
 | 
			
		||||
import time
 | 
			
		||||
import hashlib
 | 
			
		||||
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
from ..compat import (
 | 
			
		||||
    compat_str,
 | 
			
		||||
)
 | 
			
		||||
from ..utils import (
 | 
			
		||||
    clean_html,
 | 
			
		||||
    ExtractorError,
 | 
			
		||||
    int_or_none,
 | 
			
		||||
    float_or_none,
 | 
			
		||||
    parse_iso8601,
 | 
			
		||||
    parse_qs,
 | 
			
		||||
    sanitized_Request,
 | 
			
		||||
    urlencode_postdata,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class NocoIE(InfoExtractor):
 | 
			
		||||
    _VALID_URL = r'https?://(?:(?:www\.)?noco\.tv/emission/|player\.noco\.tv/\?idvideo=)(?P<id>\d+)'
 | 
			
		||||
    _LOGIN_URL = 'https://noco.tv/do.php'
 | 
			
		||||
    _API_URL_TEMPLATE = 'https://api.noco.tv/1.1/%s?ts=%s&tk=%s'
 | 
			
		||||
    _SUB_LANG_TEMPLATE = '&sub_lang=%s'
 | 
			
		||||
    _NETRC_MACHINE = 'noco'
 | 
			
		||||
 | 
			
		||||
    _TESTS = [
 | 
			
		||||
        {
 | 
			
		||||
            'url': 'http://noco.tv/emission/11538/nolife/ami-ami-idol-hello-france/',
 | 
			
		||||
            'md5': '0a993f0058ddbcd902630b2047ef710e',
 | 
			
		||||
            'info_dict': {
 | 
			
		||||
                'id': '11538',
 | 
			
		||||
                'ext': 'mp4',
 | 
			
		||||
                'title': 'Ami Ami Idol - Hello! France',
 | 
			
		||||
                'description': 'md5:4eaab46ab68fa4197a317a88a53d3b86',
 | 
			
		||||
                'upload_date': '20140412',
 | 
			
		||||
                'uploader': 'Nolife',
 | 
			
		||||
                'uploader_id': 'NOL',
 | 
			
		||||
                'duration': 2851.2,
 | 
			
		||||
            },
 | 
			
		||||
            'skip': 'Requires noco account',
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            'url': 'http://noco.tv/emission/12610/lbl42/the-guild/s01e01-wake-up-call',
 | 
			
		||||
            'md5': 'c190f1f48e313c55838f1f412225934d',
 | 
			
		||||
            'info_dict': {
 | 
			
		||||
                'id': '12610',
 | 
			
		||||
                'ext': 'mp4',
 | 
			
		||||
                'title': 'The Guild #1 - Wake-Up Call',
 | 
			
		||||
                'timestamp': 1403863200,
 | 
			
		||||
                'upload_date': '20140627',
 | 
			
		||||
                'uploader': 'LBL42',
 | 
			
		||||
                'uploader_id': 'LBL',
 | 
			
		||||
                'duration': 233.023,
 | 
			
		||||
            },
 | 
			
		||||
            'skip': 'Requires noco account',
 | 
			
		||||
        }
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    def _perform_login(self, username, password):
 | 
			
		||||
        login = self._download_json(
 | 
			
		||||
            self._LOGIN_URL, None, 'Logging in',
 | 
			
		||||
            data=urlencode_postdata({
 | 
			
		||||
                'a': 'login',
 | 
			
		||||
                'cookie': '1',
 | 
			
		||||
                'username': username,
 | 
			
		||||
                'password': password,
 | 
			
		||||
            }),
 | 
			
		||||
            headers={
 | 
			
		||||
                'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
 | 
			
		||||
            })
 | 
			
		||||
 | 
			
		||||
        if 'erreur' in login:
 | 
			
		||||
            raise ExtractorError('Unable to login: %s' % clean_html(login['erreur']), expected=True)
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def _ts():
 | 
			
		||||
        return int(time.time() * 1000)
 | 
			
		||||
 | 
			
		||||
    def _call_api(self, path, video_id, note, sub_lang=None):
 | 
			
		||||
        ts = compat_str(self._ts() + self._ts_offset)
 | 
			
		||||
        tk = hashlib.md5((hashlib.md5(ts.encode('ascii')).hexdigest() + '#8S?uCraTedap6a').encode('ascii')).hexdigest()
 | 
			
		||||
        url = self._API_URL_TEMPLATE % (path, ts, tk)
 | 
			
		||||
        if sub_lang:
 | 
			
		||||
            url += self._SUB_LANG_TEMPLATE % sub_lang
 | 
			
		||||
 | 
			
		||||
        request = sanitized_Request(url)
 | 
			
		||||
        request.add_header('Referer', self._referer)
 | 
			
		||||
 | 
			
		||||
        resp = self._download_json(request, video_id, note)
 | 
			
		||||
 | 
			
		||||
        if isinstance(resp, dict) and resp.get('error'):
 | 
			
		||||
            self._raise_error(resp['error'], resp['description'])
 | 
			
		||||
 | 
			
		||||
        return resp
 | 
			
		||||
 | 
			
		||||
    def _raise_error(self, error, description):
 | 
			
		||||
        raise ExtractorError(
 | 
			
		||||
            '%s returned error: %s - %s' % (self.IE_NAME, error, description),
 | 
			
		||||
            expected=True)
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        video_id = self._match_id(url)
 | 
			
		||||
 | 
			
		||||
        # Timestamp adjustment offset between server time and local time
 | 
			
		||||
        # must be calculated in order to use timestamps closest to server's
 | 
			
		||||
        # in all API requests (see https://github.com/ytdl-org/youtube-dl/issues/7864)
 | 
			
		||||
        webpage = self._download_webpage(url, video_id)
 | 
			
		||||
 | 
			
		||||
        player_url = self._search_regex(
 | 
			
		||||
            r'(["\'])(?P<player>https?://noco\.tv/(?:[^/]+/)+NocoPlayer.+?\.swf.*?)\1',
 | 
			
		||||
            webpage, 'noco player', group='player',
 | 
			
		||||
            default='http://noco.tv/cdata/js/player/NocoPlayer-v1.2.40.swf')
 | 
			
		||||
 | 
			
		||||
        qs = parse_qs(player_url)
 | 
			
		||||
        ts = int_or_none(qs.get('ts', [None])[0])
 | 
			
		||||
        self._ts_offset = ts - self._ts() if ts else 0
 | 
			
		||||
        self._referer = player_url
 | 
			
		||||
 | 
			
		||||
        medias = self._call_api(
 | 
			
		||||
            'shows/%s/medias' % video_id,
 | 
			
		||||
            video_id, 'Downloading video JSON')
 | 
			
		||||
 | 
			
		||||
        show = self._call_api(
 | 
			
		||||
            'shows/by_id/%s' % video_id,
 | 
			
		||||
            video_id, 'Downloading show JSON')[0]
 | 
			
		||||
 | 
			
		||||
        options = self._call_api(
 | 
			
		||||
            'users/init', video_id,
 | 
			
		||||
            'Downloading user options JSON')['options']
 | 
			
		||||
        audio_lang_pref = options.get('audio_language') or options.get('language', 'fr')
 | 
			
		||||
 | 
			
		||||
        if audio_lang_pref == 'original':
 | 
			
		||||
            audio_lang_pref = show['original_lang']
 | 
			
		||||
        if len(medias) == 1:
 | 
			
		||||
            audio_lang_pref = list(medias.keys())[0]
 | 
			
		||||
        elif audio_lang_pref not in medias:
 | 
			
		||||
            audio_lang_pref = 'fr'
 | 
			
		||||
 | 
			
		||||
        qualities = self._call_api(
 | 
			
		||||
            'qualities',
 | 
			
		||||
            video_id, 'Downloading qualities JSON')
 | 
			
		||||
 | 
			
		||||
        formats = []
 | 
			
		||||
 | 
			
		||||
        for audio_lang, audio_lang_dict in medias.items():
 | 
			
		||||
            preference = 1 if audio_lang == audio_lang_pref else 0
 | 
			
		||||
            for sub_lang, lang_dict in audio_lang_dict['video_list'].items():
 | 
			
		||||
                for format_id, fmt in lang_dict['quality_list'].items():
 | 
			
		||||
                    format_id_extended = 'audio-%s_sub-%s_%s' % (audio_lang, sub_lang, format_id)
 | 
			
		||||
 | 
			
		||||
                    video = self._call_api(
 | 
			
		||||
                        'shows/%s/video/%s/%s' % (video_id, format_id.lower(), audio_lang),
 | 
			
		||||
                        video_id, 'Downloading %s video JSON' % format_id_extended,
 | 
			
		||||
                        sub_lang if sub_lang != 'none' else None)
 | 
			
		||||
 | 
			
		||||
                    file_url = video['file']
 | 
			
		||||
                    if not file_url:
 | 
			
		||||
                        continue
 | 
			
		||||
 | 
			
		||||
                    if file_url in ['forbidden', 'not found']:
 | 
			
		||||
                        popmessage = video['popmessage']
 | 
			
		||||
                        self._raise_error(popmessage['title'], popmessage['message'])
 | 
			
		||||
 | 
			
		||||
                    formats.append({
 | 
			
		||||
                        'url': file_url,
 | 
			
		||||
                        'format_id': format_id_extended,
 | 
			
		||||
                        'width': int_or_none(fmt.get('res_width')),
 | 
			
		||||
                        'height': int_or_none(fmt.get('res_lines')),
 | 
			
		||||
                        'abr': int_or_none(fmt.get('audiobitrate'), 1000),
 | 
			
		||||
                        'vbr': int_or_none(fmt.get('videobitrate'), 1000),
 | 
			
		||||
                        'filesize': int_or_none(fmt.get('filesize')),
 | 
			
		||||
                        'format_note': qualities[format_id].get('quality_name'),
 | 
			
		||||
                        'quality': qualities[format_id].get('priority'),
 | 
			
		||||
                        'language_preference': preference,
 | 
			
		||||
                    })
 | 
			
		||||
 | 
			
		||||
        self._sort_formats(formats)
 | 
			
		||||
 | 
			
		||||
        timestamp = parse_iso8601(show.get('online_date_start_utc'), ' ')
 | 
			
		||||
 | 
			
		||||
        if timestamp is not None and timestamp < 0:
 | 
			
		||||
            timestamp = None
 | 
			
		||||
 | 
			
		||||
        uploader = show.get('partner_name')
 | 
			
		||||
        uploader_id = show.get('partner_key')
 | 
			
		||||
        duration = float_or_none(show.get('duration_ms'), 1000)
 | 
			
		||||
 | 
			
		||||
        thumbnails = []
 | 
			
		||||
        for thumbnail_key, thumbnail_url in show.items():
 | 
			
		||||
            m = re.search(r'^screenshot_(?P<width>\d+)x(?P<height>\d+)$', thumbnail_key)
 | 
			
		||||
            if not m:
 | 
			
		||||
                continue
 | 
			
		||||
            thumbnails.append({
 | 
			
		||||
                'url': thumbnail_url,
 | 
			
		||||
                'width': int(m.group('width')),
 | 
			
		||||
                'height': int(m.group('height')),
 | 
			
		||||
            })
 | 
			
		||||
 | 
			
		||||
        episode = show.get('show_TT') or show.get('show_OT')
 | 
			
		||||
        family = show.get('family_TT') or show.get('family_OT')
 | 
			
		||||
        episode_number = show.get('episode_number')
 | 
			
		||||
 | 
			
		||||
        title = ''
 | 
			
		||||
        if family:
 | 
			
		||||
            title += family
 | 
			
		||||
        if episode_number:
 | 
			
		||||
            title += ' #' + compat_str(episode_number)
 | 
			
		||||
        if episode:
 | 
			
		||||
            title += ' - ' + compat_str(episode)
 | 
			
		||||
 | 
			
		||||
        description = show.get('show_resume') or show.get('family_resume')
 | 
			
		||||
 | 
			
		||||
        return {
 | 
			
		||||
            'id': video_id,
 | 
			
		||||
            'title': title,
 | 
			
		||||
            'description': description,
 | 
			
		||||
            'thumbnails': thumbnails,
 | 
			
		||||
            'timestamp': timestamp,
 | 
			
		||||
            'uploader': uploader,
 | 
			
		||||
            'uploader_id': uploader_id,
 | 
			
		||||
            'duration': duration,
 | 
			
		||||
            'formats': formats,
 | 
			
		||||
        }
 | 
			
		||||
@@ -1,15 +0,0 @@
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
from .nexx import NexxIE
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SpiegeltvIE(InfoExtractor):
 | 
			
		||||
    _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/videos/(?P<id>\d+)'
 | 
			
		||||
    _TEST = {
 | 
			
		||||
        'url': 'http://www.spiegel.tv/videos/161681-flug-mh370/',
 | 
			
		||||
        'only_matching': True,
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        return self.url_result(
 | 
			
		||||
            'https://api.nexx.cloud/v3/748/videos/byid/%s'
 | 
			
		||||
            % self._match_id(url), ie=NexxIE.ie_key())
 | 
			
		||||
@@ -1,41 +0,0 @@
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
from .ooyala import OoyalaIE
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TastyTradeIE(InfoExtractor):
 | 
			
		||||
    _VALID_URL = r'https?://(?:www\.)?tastytrade\.com/tt/shows/[^/]+/episodes/(?P<id>[^/?#&]+)'
 | 
			
		||||
 | 
			
		||||
    _TESTS = [{
 | 
			
		||||
        'url': 'https://www.tastytrade.com/tt/shows/market-measures/episodes/correlation-in-short-volatility-06-28-2017',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': 'F3bnlzbToeI6pLEfRyrlfooIILUjz4nM',
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'title': 'A History of Teaming',
 | 
			
		||||
            'description': 'md5:2a9033db8da81f2edffa4c99888140b3',
 | 
			
		||||
            'duration': 422.255,
 | 
			
		||||
        },
 | 
			
		||||
        'params': {
 | 
			
		||||
            'skip_download': True,
 | 
			
		||||
        },
 | 
			
		||||
        'add_ie': ['Ooyala'],
 | 
			
		||||
    }, {
 | 
			
		||||
        'url': 'https://www.tastytrade.com/tt/shows/daily-dose/episodes/daily-dose-06-30-2017',
 | 
			
		||||
        'only_matching': True,
 | 
			
		||||
    }]
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        display_id = self._match_id(url)
 | 
			
		||||
        webpage = self._download_webpage(url, display_id)
 | 
			
		||||
 | 
			
		||||
        ooyala_code = self._search_regex(
 | 
			
		||||
            r'data-media-id=(["\'])(?P<code>(?:(?!\1).)+)\1',
 | 
			
		||||
            webpage, 'ooyala code', group='code')
 | 
			
		||||
 | 
			
		||||
        info = self._search_json_ld(webpage, display_id, fatal=False)
 | 
			
		||||
        info.update({
 | 
			
		||||
            '_type': 'url_transparent',
 | 
			
		||||
            'ie_key': OoyalaIE.ie_key(),
 | 
			
		||||
            'url': 'ooyala:%s' % ooyala_code,
 | 
			
		||||
            'display_id': display_id,
 | 
			
		||||
        })
 | 
			
		||||
        return info
 | 
			
		||||
@@ -1,45 +0,0 @@
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TudouPlaylistIE(InfoExtractor):
 | 
			
		||||
    IE_NAME = 'tudou:playlist'
 | 
			
		||||
    _VALID_URL = r'https?://(?:www\.)?tudou\.com/listplay/(?P<id>[\w-]{11})\.html'
 | 
			
		||||
    _TESTS = [{
 | 
			
		||||
        'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo.html',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': 'zzdE77v6Mmo',
 | 
			
		||||
        },
 | 
			
		||||
        'playlist_mincount': 209,
 | 
			
		||||
    }]
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        playlist_id = self._match_id(url)
 | 
			
		||||
        playlist_data = self._download_json(
 | 
			
		||||
            'http://www.tudou.com/tvp/plist.action?lcode=%s' % playlist_id, playlist_id)
 | 
			
		||||
        entries = [self.url_result(
 | 
			
		||||
            'http://www.tudou.com/programs/view/%s' % item['icode'],
 | 
			
		||||
            'Tudou', item['icode'],
 | 
			
		||||
            item['kw']) for item in playlist_data['items']]
 | 
			
		||||
        return self.playlist_result(entries, playlist_id)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TudouAlbumIE(InfoExtractor):
 | 
			
		||||
    IE_NAME = 'tudou:album'
 | 
			
		||||
    _VALID_URL = r'https?://(?:www\.)?tudou\.com/album(?:cover|play)/(?P<id>[\w-]{11})'
 | 
			
		||||
    _TESTS = [{
 | 
			
		||||
        'url': 'http://www.tudou.com/albumplay/v5qckFJvNJg.html',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': 'v5qckFJvNJg',
 | 
			
		||||
        },
 | 
			
		||||
        'playlist_mincount': 45,
 | 
			
		||||
    }]
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        album_id = self._match_id(url)
 | 
			
		||||
        album_data = self._download_json(
 | 
			
		||||
            'http://www.tudou.com/tvp/alist.action?acode=%s' % album_id, album_id)
 | 
			
		||||
        entries = [self.url_result(
 | 
			
		||||
            'http://www.tudou.com/programs/view/%s' % item['icode'],
 | 
			
		||||
            'Tudou', item['icode'],
 | 
			
		||||
            item['kw']) for item in album_data['items']]
 | 
			
		||||
        return self.playlist_result(entries, album_id)
 | 
			
		||||
@@ -1,65 +0,0 @@
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
from ..utils import (
 | 
			
		||||
    decode_packed_codes,
 | 
			
		||||
    js_to_json,
 | 
			
		||||
    NO_DEFAULT,
 | 
			
		||||
    PACKED_CODES_RE,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class VidziIE(InfoExtractor):
 | 
			
		||||
    _VALID_URL = r'https?://(?:www\.)?vidzi\.(?:tv|cc|si|nu)/(?:embed-)?(?P<id>[0-9a-zA-Z]+)'
 | 
			
		||||
    _TESTS = [{
 | 
			
		||||
        'url': 'http://vidzi.tv/cghql9yq6emu.html',
 | 
			
		||||
        'md5': '4f16c71ca0c8c8635ab6932b5f3f1660',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': 'cghql9yq6emu',
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'title': 'youtube-dl test video  1\\\\2\'3/4<5\\\\6ä7↭',
 | 
			
		||||
        },
 | 
			
		||||
        'params': {
 | 
			
		||||
            # m3u8 download
 | 
			
		||||
            'skip_download': True,
 | 
			
		||||
        },
 | 
			
		||||
    }, {
 | 
			
		||||
        'url': 'http://vidzi.tv/embed-4z2yb0rzphe9-600x338.html',
 | 
			
		||||
        'only_matching': True,
 | 
			
		||||
    }, {
 | 
			
		||||
        'url': 'http://vidzi.cc/cghql9yq6emu.html',
 | 
			
		||||
        'only_matching': True,
 | 
			
		||||
    }, {
 | 
			
		||||
        'url': 'https://vidzi.si/rph9gztxj1et.html',
 | 
			
		||||
        'only_matching': True,
 | 
			
		||||
    }, {
 | 
			
		||||
        'url': 'http://vidzi.nu/cghql9yq6emu.html',
 | 
			
		||||
        'only_matching': True,
 | 
			
		||||
    }]
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        video_id = self._match_id(url)
 | 
			
		||||
 | 
			
		||||
        webpage = self._download_webpage(
 | 
			
		||||
            'http://vidzi.tv/%s' % video_id, video_id)
 | 
			
		||||
        title = self._html_search_regex(
 | 
			
		||||
            r'(?s)<h2 class="video-title">(.*?)</h2>', webpage, 'title')
 | 
			
		||||
 | 
			
		||||
        codes = [webpage]
 | 
			
		||||
        codes.extend([
 | 
			
		||||
            decode_packed_codes(mobj.group(0)).replace('\\\'', '\'')
 | 
			
		||||
            for mobj in re.finditer(PACKED_CODES_RE, webpage)])
 | 
			
		||||
        for num, code in enumerate(codes, 1):
 | 
			
		||||
            jwplayer_data = self._parse_json(
 | 
			
		||||
                self._search_regex(
 | 
			
		||||
                    r'setup\(([^)]+)\)', code, 'jwplayer data',
 | 
			
		||||
                    default=NO_DEFAULT if num == len(codes) else '{}'),
 | 
			
		||||
                video_id, transform_source=lambda s: js_to_json(
 | 
			
		||||
                    re.sub(r'\s*\+\s*window\[.+?\]', '', s)))
 | 
			
		||||
            if jwplayer_data:
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
        info_dict = self._parse_jwplayer_data(jwplayer_data, video_id, require_title=False)
 | 
			
		||||
        info_dict['title'] = title
 | 
			
		||||
 | 
			
		||||
        return info_dict
 | 
			
		||||
		Reference in New Issue
	
	Block a user