mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-09-23 15:25:48 +00:00
[ie/lrt] Fix extractors (#14193)
Some checks failed
CodeQL / Analyze (python) (push) Has been cancelled
Download Tests / Quick Download Tests (push) Has been cancelled
Download Tests / Full Download Tests (ubuntu-latest, 3.10) (push) Has been cancelled
Download Tests / Full Download Tests (ubuntu-latest, 3.11) (push) Has been cancelled
Download Tests / Full Download Tests (ubuntu-latest, 3.12) (push) Has been cancelled
Download Tests / Full Download Tests (ubuntu-latest, 3.13) (push) Has been cancelled
Download Tests / Full Download Tests (ubuntu-latest, pypy-3.11) (push) Has been cancelled
Download Tests / Full Download Tests (windows-latest, 3.9) (push) Has been cancelled
Download Tests / Full Download Tests (windows-latest, pypy-3.11) (push) Has been cancelled
Quick Test / Core Test (push) Has been cancelled
Quick Test / Code check (push) Has been cancelled
Release (master) / release (push) Has been cancelled
Release (master) / publish_pypi (push) Has been cancelled
Core Tests / Core Tests (ubuntu-latest, 3.10) (push) Has been cancelled
Core Tests / Core Tests (ubuntu-latest, 3.11) (push) Has been cancelled
Core Tests / Core Tests (ubuntu-latest, 3.12) (push) Has been cancelled
Core Tests / Core Tests (ubuntu-latest, 3.13) (push) Has been cancelled
Core Tests / Core Tests (ubuntu-latest, pypy-3.11) (push) Has been cancelled
Core Tests / Core Tests (windows-latest, 3.10) (push) Has been cancelled
Core Tests / Core Tests (windows-latest, 3.12) (push) Has been cancelled
Core Tests / Core Tests (windows-latest, 3.13) (push) Has been cancelled
Core Tests / Core Tests (windows-latest, 3.9) (push) Has been cancelled
Core Tests / Core Tests (windows-latest, pypy-3.11) (push) Has been cancelled
Release (nightly) / check_nightly (push) Has been cancelled
Release (nightly) / release (push) Has been cancelled
Release (nightly) / publish_pypi (push) Has been cancelled
Some checks failed
CodeQL / Analyze (python) (push) Has been cancelled
Download Tests / Quick Download Tests (push) Has been cancelled
Download Tests / Full Download Tests (ubuntu-latest, 3.10) (push) Has been cancelled
Download Tests / Full Download Tests (ubuntu-latest, 3.11) (push) Has been cancelled
Download Tests / Full Download Tests (ubuntu-latest, 3.12) (push) Has been cancelled
Download Tests / Full Download Tests (ubuntu-latest, 3.13) (push) Has been cancelled
Download Tests / Full Download Tests (ubuntu-latest, pypy-3.11) (push) Has been cancelled
Download Tests / Full Download Tests (windows-latest, 3.9) (push) Has been cancelled
Download Tests / Full Download Tests (windows-latest, pypy-3.11) (push) Has been cancelled
Quick Test / Core Test (push) Has been cancelled
Quick Test / Code check (push) Has been cancelled
Release (master) / release (push) Has been cancelled
Release (master) / publish_pypi (push) Has been cancelled
Core Tests / Core Tests (ubuntu-latest, 3.10) (push) Has been cancelled
Core Tests / Core Tests (ubuntu-latest, 3.11) (push) Has been cancelled
Core Tests / Core Tests (ubuntu-latest, 3.12) (push) Has been cancelled
Core Tests / Core Tests (ubuntu-latest, 3.13) (push) Has been cancelled
Core Tests / Core Tests (ubuntu-latest, pypy-3.11) (push) Has been cancelled
Core Tests / Core Tests (windows-latest, 3.10) (push) Has been cancelled
Core Tests / Core Tests (windows-latest, 3.12) (push) Has been cancelled
Core Tests / Core Tests (windows-latest, 3.13) (push) Has been cancelled
Core Tests / Core Tests (windows-latest, 3.9) (push) Has been cancelled
Core Tests / Core Tests (windows-latest, pypy-3.11) (push) Has been cancelled
Release (nightly) / check_nightly (push) Has been cancelled
Release (nightly) / release (push) Has been cancelled
Release (nightly) / publish_pypi (push) Has been cancelled
Closes #13501 Authored by: seproDev
This commit is contained in:
@@ -1,22 +1,14 @@
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
merge_dicts,
|
||||
traverse_obj,
|
||||
unified_timestamp,
|
||||
url_or_none,
|
||||
urljoin,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class LRTBaseIE(InfoExtractor):
|
||||
def _extract_js_var(self, webpage, var_name, default=None):
|
||||
return self._search_regex(
|
||||
fr'{var_name}\s*=\s*(["\'])((?:(?!\1).)+)\1',
|
||||
webpage, var_name.replace('_', ' '), default, group=2)
|
||||
|
||||
|
||||
class LRTStreamIE(LRTBaseIE):
|
||||
class LRTStreamIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?lrt\.lt/mediateka/tiesiogiai/(?P<id>[\w-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.lrt.lt/mediateka/tiesiogiai/lrt-opus',
|
||||
@@ -31,86 +23,110 @@ class LRTStreamIE(LRTBaseIE):
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
streams_data = self._download_json(self._extract_js_var(webpage, 'tokenURL'), video_id)
|
||||
|
||||
# TODO: Use _search_nextjs_v13_data once fixed
|
||||
get_stream_url = self._search_regex(
|
||||
r'\\"get_streams_url\\":\\"([^"]+)\\"', webpage, 'stream URL')
|
||||
streams_data = self._download_json(get_stream_url, video_id)
|
||||
|
||||
formats, subtitles = [], {}
|
||||
for stream_url in traverse_obj(streams_data, (
|
||||
'response', 'data', lambda k, _: k.startswith('content')), expected_type=url_or_none):
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(stream_url, video_id, 'mp4', m3u8_id='hls', live=True)
|
||||
'response', 'data', lambda k, _: k.startswith('content'), {url_or_none})):
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
stream_url, video_id, 'mp4', m3u8_id='hls', live=True)
|
||||
formats.extend(fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, subs)
|
||||
|
||||
stream_title = self._extract_js_var(webpage, 'video_title', 'LRT')
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'is_live': True,
|
||||
'title': f'{self._og_search_title(webpage)} - {stream_title}',
|
||||
'title': self._og_search_title(webpage),
|
||||
}
|
||||
|
||||
|
||||
class LRTVODIE(LRTBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?lrt\.lt(?P<path>/mediateka/irasas/(?P<id>[0-9]+))'
|
||||
class LRTVODIE(InfoExtractor):
|
||||
_VALID_URL = [
|
||||
r'https?://(?:(?:www|archyvai)\.)?lrt\.lt/mediateka/irasas/(?P<id>[0-9]+)',
|
||||
r'https?://(?:(?:www|archyvai)\.)?lrt\.lt/mediateka/video/[^?#]+\?(?:[^#]*&)?episode=(?P<id>[0-9]+)',
|
||||
]
|
||||
_TESTS = [{
|
||||
# m3u8 download
|
||||
'url': 'https://www.lrt.lt/mediateka/irasas/2000127261/greita-ir-gardu-sicilijos-ikvepta-klasikiniu-makaronu-su-baklazanais-vakariene',
|
||||
'info_dict': {
|
||||
'id': '2000127261',
|
||||
'ext': 'mp4',
|
||||
'title': 'Greita ir gardu: Sicilijos įkvėpta klasikinių makaronų su baklažanais vakarienė',
|
||||
'title': 'Nustebinkite svečius klasikiniu makaronų su baklažanais receptu',
|
||||
'description': 'md5:ad7d985f51b0dc1489ba2d76d7ed47fa',
|
||||
'duration': 3035,
|
||||
'timestamp': 1604079000,
|
||||
'timestamp': 1604086200,
|
||||
'upload_date': '20201030',
|
||||
'tags': ['LRT TELEVIZIJA', 'Beatos virtuvė', 'Beata Nicholson', 'Makaronai', 'Baklažanai', 'Vakarienė', 'Receptas'],
|
||||
'thumbnail': 'https://www.lrt.lt/img/2020/10/30/764041-126478-1287x836.jpg',
|
||||
'channel': 'Beatos virtuvė',
|
||||
},
|
||||
}, {
|
||||
# direct mp3 download
|
||||
'url': 'http://www.lrt.lt/mediateka/irasas/1013074524/',
|
||||
'md5': '389da8ca3cad0f51d12bed0c844f6a0a',
|
||||
# audio download
|
||||
'url': 'https://www.lrt.lt/mediateka/irasas/1013074524/kita-tema',
|
||||
'md5': 'fc982f10274929c66fdff65f75615cb0',
|
||||
'info_dict': {
|
||||
'id': '1013074524',
|
||||
'ext': 'mp3',
|
||||
'title': 'Kita tema 2016-09-05 15:05',
|
||||
'ext': 'mp4',
|
||||
'title': 'Kita tema',
|
||||
'description': 'md5:1b295a8fc7219ed0d543fc228c931fb5',
|
||||
'duration': 3008,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'channel': 'Kita tema',
|
||||
'timestamp': 1473087900,
|
||||
'upload_date': '20160905',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.lrt.lt/mediateka/video/auksinis-protas-vasara?episode=2000420320&season=%2Fmediateka%2Fvideo%2Fauksinis-protas-vasara%2F2025',
|
||||
'info_dict': {
|
||||
'id': '2000420320',
|
||||
'ext': 'mp4',
|
||||
'title': 'Kuris senovės romėnų poetas aprašė Narcizo mitą?',
|
||||
'description': 'Intelektinė viktorina. Ved. Arūnas Valinskas ir Andrius Tapinas.',
|
||||
'channel': 'Auksinis protas. Vasara',
|
||||
'thumbnail': 'https://www.lrt.lt/img/2025/06/09/2094343-987905-1287x836.jpg',
|
||||
'tags': ['LRT TELEVIZIJA', 'Auksinis protas'],
|
||||
'timestamp': 1749851040,
|
||||
'upload_date': '20250613',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://archyvai.lrt.lt/mediateka/video/ziniu-riteriai-ir-damos?episode=49685&season=%2Fmediateka%2Fvideo%2Fziniu-riteriai-ir-damos%2F2013',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://archyvai.lrt.lt/mediateka/irasas/2000077058/panorama-1989-baltijos-kelias',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
path, video_id = self._match_valid_url(url).group('path', 'id')
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
media_url = self._extract_js_var(webpage, 'main_url', path)
|
||||
media = self._download_json(self._extract_js_var(
|
||||
webpage, 'media_info_url',
|
||||
'https://www.lrt.lt/servisai/stream_url/vod/media_info/'),
|
||||
video_id, query={'url': media_url})
|
||||
# TODO: Use _search_nextjs_v13_data once fixed
|
||||
canonical_url = (
|
||||
self._search_regex(r'\\"(?:article|data)\\":{[^}]*\\"url\\":\\"(/[^"]+)\\"', webpage, 'content URL', fatal=False)
|
||||
or self._search_regex(r'<link\s+rel="canonical"\s*href="(/[^"]+)"', webpage, 'canonical URL'))
|
||||
|
||||
media = self._download_json(
|
||||
'https://www.lrt.lt/servisai/stream_url/vod/media_info/',
|
||||
video_id, query={'url': canonical_url})
|
||||
jw_data = self._parse_jwplayer_data(
|
||||
media['playlist_item'], video_id, base_url=url)
|
||||
|
||||
json_ld_data = self._search_json_ld(webpage, video_id)
|
||||
|
||||
tags = []
|
||||
for tag in (media.get('tags') or []):
|
||||
tag_name = tag.get('name')
|
||||
if not tag_name:
|
||||
continue
|
||||
tags.append(tag_name)
|
||||
|
||||
clean_info = {
|
||||
'description': clean_html(media.get('content')),
|
||||
'tags': tags,
|
||||
return {
|
||||
**jw_data,
|
||||
**traverse_obj(media, {
|
||||
'id': ('id', {str}),
|
||||
'title': ('title', {str}),
|
||||
'description': ('content', {clean_html}),
|
||||
'timestamp': ('date', {lambda x: x.replace('.', '/')}, {unified_timestamp}),
|
||||
'tags': ('tags', ..., 'name', {str}),
|
||||
}),
|
||||
}
|
||||
|
||||
return merge_dicts(clean_info, jw_data, json_ld_data)
|
||||
|
||||
|
||||
class LRTRadioIE(LRTBaseIE):
|
||||
class LRTRadioIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?lrt\.lt/radioteka/irasas/(?P<id>\d+)/(?P<path>[^?#/]+)'
|
||||
_TESTS = [{
|
||||
# m3u8 download
|
||||
|
Reference in New Issue
Block a user