mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-09-23 15:25:48 +00:00
@@ -8,7 +8,7 @@ def main():
|
||||
return # This is unused in yt-dlp
|
||||
|
||||
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
|
||||
options, args = parser.parse_args()
|
||||
_, args = parser.parse_args()
|
||||
if len(args) != 2:
|
||||
parser.error('Expected an input and an output filename')
|
||||
|
||||
|
@@ -75,7 +75,7 @@ dev = [
|
||||
]
|
||||
static-analysis = [
|
||||
"autopep8~=2.0",
|
||||
"ruff~=0.12.0",
|
||||
"ruff~=0.13.0",
|
||||
]
|
||||
test = [
|
||||
"pytest~=8.1",
|
||||
|
@@ -1945,7 +1945,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
server_thread.daemon = True
|
||||
server_thread.start()
|
||||
|
||||
(content, urlh) = self.ie._download_webpage_handle(
|
||||
content, _ = self.ie._download_webpage_handle(
|
||||
f'http://127.0.0.1:{port}/teapot', None,
|
||||
expected_status=TEAPOT_RESPONSE_STATUS)
|
||||
self.assertEqual(content, TEAPOT_RESPONSE_BODY)
|
||||
|
@@ -29,7 +29,7 @@ class TestOverwrites(unittest.TestCase):
|
||||
'-o', 'test.webm',
|
||||
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
sout, serr = outp.communicate()
|
||||
sout, _ = outp.communicate()
|
||||
self.assertTrue(b'has already been downloaded' in sout)
|
||||
# if the file has no content, it has not been redownloaded
|
||||
self.assertTrue(os.path.getsize(download_file) < 1)
|
||||
@@ -41,7 +41,7 @@ class TestOverwrites(unittest.TestCase):
|
||||
'-o', 'test.webm',
|
||||
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
sout, serr = outp.communicate()
|
||||
sout, _ = outp.communicate()
|
||||
self.assertTrue(b'has already been downloaded' not in sout)
|
||||
# if the file has no content, it has not been redownloaded
|
||||
self.assertTrue(os.path.getsize(download_file) > 1)
|
||||
|
@@ -153,7 +153,7 @@ class TestPoTokenProvider:
|
||||
|
||||
with pytest.raises(
|
||||
PoTokenProviderRejectedRequest,
|
||||
match='External requests by "example" provider do not support proxy scheme "socks4". Supported proxy '
|
||||
match=r'External requests by "example" provider do not support proxy scheme "socks4"\. Supported proxy '
|
||||
'schemes: http, socks5h',
|
||||
):
|
||||
provider.request_pot(pot_request)
|
||||
|
@@ -22,7 +22,7 @@ class TestVerboseOutput(unittest.TestCase):
|
||||
'--username', 'johnsmith@gmail.com',
|
||||
'--password', 'my_secret_password',
|
||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
sout, serr = outp.communicate()
|
||||
_, serr = outp.communicate()
|
||||
self.assertTrue(b'--username' in serr)
|
||||
self.assertTrue(b'johnsmith' not in serr)
|
||||
self.assertTrue(b'--password' in serr)
|
||||
@@ -36,7 +36,7 @@ class TestVerboseOutput(unittest.TestCase):
|
||||
'-u', 'johnsmith@gmail.com',
|
||||
'-p', 'my_secret_password',
|
||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
sout, serr = outp.communicate()
|
||||
_, serr = outp.communicate()
|
||||
self.assertTrue(b'-u' in serr)
|
||||
self.assertTrue(b'johnsmith' not in serr)
|
||||
self.assertTrue(b'-p' in serr)
|
||||
@@ -50,7 +50,7 @@ class TestVerboseOutput(unittest.TestCase):
|
||||
'--username=johnsmith@gmail.com',
|
||||
'--password=my_secret_password',
|
||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
sout, serr = outp.communicate()
|
||||
_, serr = outp.communicate()
|
||||
self.assertTrue(b'--username' in serr)
|
||||
self.assertTrue(b'johnsmith' not in serr)
|
||||
self.assertTrue(b'--password' in serr)
|
||||
@@ -64,7 +64,7 @@ class TestVerboseOutput(unittest.TestCase):
|
||||
'-u=johnsmith@gmail.com',
|
||||
'-p=my_secret_password',
|
||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
sout, serr = outp.communicate()
|
||||
_, serr = outp.communicate()
|
||||
self.assertTrue(b'-u' in serr)
|
||||
self.assertTrue(b'johnsmith' not in serr)
|
||||
self.assertTrue(b'-p' in serr)
|
||||
|
@@ -149,14 +149,14 @@ class FlvReader(io.BytesIO):
|
||||
segments_count = self.read_unsigned_char()
|
||||
segments = []
|
||||
for _ in range(segments_count):
|
||||
box_size, box_type, box_data = self.read_box_info()
|
||||
_box_size, box_type, box_data = self.read_box_info()
|
||||
assert box_type == b'asrt'
|
||||
segment = FlvReader(box_data).read_asrt()
|
||||
segments.append(segment)
|
||||
fragments_run_count = self.read_unsigned_char()
|
||||
fragments = []
|
||||
for _ in range(fragments_run_count):
|
||||
box_size, box_type, box_data = self.read_box_info()
|
||||
_box_size, box_type, box_data = self.read_box_info()
|
||||
assert box_type == b'afrt'
|
||||
fragments.append(FlvReader(box_data).read_afrt())
|
||||
|
||||
@@ -167,7 +167,7 @@ class FlvReader(io.BytesIO):
|
||||
}
|
||||
|
||||
def read_bootstrap_info(self):
|
||||
total_size, box_type, box_data = self.read_box_info()
|
||||
_, box_type, box_data = self.read_box_info()
|
||||
assert box_type == b'abst'
|
||||
return FlvReader(box_data).read_abst()
|
||||
|
||||
@@ -324,9 +324,9 @@ class F4mFD(FragmentFD):
|
||||
if requested_bitrate is None or len(formats) == 1:
|
||||
# get the best format
|
||||
formats = sorted(formats, key=lambda f: f[0])
|
||||
rate, media = formats[-1]
|
||||
_, media = formats[-1]
|
||||
else:
|
||||
rate, media = next(filter(
|
||||
_, media = next(filter(
|
||||
lambda f: int(f[0]) == requested_bitrate, formats))
|
||||
|
||||
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
|
||||
|
@@ -1366,7 +1366,7 @@ class BilibiliSpaceVideoIE(BilibiliSpaceBaseIE):
|
||||
else:
|
||||
yield self.url_result(f'https://www.bilibili.com/video/{entry["bvid"]}', BiliBiliIE, entry['bvid'])
|
||||
|
||||
metadata, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries)
|
||||
_, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries)
|
||||
return self.playlist_result(paged_list, playlist_id)
|
||||
|
||||
|
||||
@@ -1400,7 +1400,7 @@ class BilibiliSpaceAudioIE(BilibiliSpaceBaseIE):
|
||||
for entry in page_data.get('data') or []:
|
||||
yield self.url_result(f'https://www.bilibili.com/audio/au{entry["id"]}', BilibiliAudioIE, entry['id'])
|
||||
|
||||
metadata, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries)
|
||||
_, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries)
|
||||
return self.playlist_result(paged_list, playlist_id)
|
||||
|
||||
|
||||
|
@@ -174,7 +174,7 @@ class BrainPOPLegacyBaseIE(BrainPOPBaseIE):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
slug, display_id = self._match_valid_url(url).group('slug', 'id')
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
topic_data = self._search_json(
|
||||
r'var\s+content\s*=\s*', webpage, 'content data',
|
||||
|
@@ -272,6 +272,7 @@ class CNNIndonesiaIE(InfoExtractor):
|
||||
return merge_dicts(json_ld_data, {
|
||||
'_type': 'url_transparent',
|
||||
'url': embed_url,
|
||||
'id': video_id,
|
||||
'upload_date': upload_date,
|
||||
'tags': try_call(lambda: self._html_search_meta('keywords', webpage).split(', ')),
|
||||
})
|
||||
|
@@ -7,7 +7,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class FifaIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.fifa\.com/fifaplus/(?P<locale>\w{2})/watch/([^#?]+/)?(?P<id>\w+)'
|
||||
_VALID_URL = r'https?://www\.fifa\.com/fifaplus/\w{2}/watch/([^#?]+/)?(?P<id>\w+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.fifa.com/fifaplus/en/watch/7on10qPcnyLajDDU3ntg6y',
|
||||
'info_dict': {
|
||||
@@ -51,7 +51,7 @@ class FifaIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id, locale = self._match_valid_url(url).group('id', 'locale')
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
preconnect_link = self._search_regex(
|
||||
|
@@ -129,7 +129,7 @@ class NownessSeriesIE(NownessBaseIE):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id, series = self._api_request(url, 'series/getBySlug/%s')
|
||||
_, series = self._api_request(url, 'series/getBySlug/%s')
|
||||
entries = [self._extract_url_result(post) for post in series['posts']]
|
||||
series_title = None
|
||||
series_description = None
|
||||
|
@@ -414,7 +414,7 @@ class RadioFranceProgramScheduleIE(RadioFranceBaseIE):
|
||||
_VALID_URL = rf'''(?x)
|
||||
{RadioFranceBaseIE._VALID_URL_BASE}
|
||||
/(?P<station>{RadioFranceBaseIE._STATIONS_RE})
|
||||
/grille-programmes(?:\?date=(?P<date>[\d-]+))?
|
||||
/grille-programmes
|
||||
'''
|
||||
|
||||
_TESTS = [{
|
||||
@@ -463,7 +463,7 @@ class RadioFranceProgramScheduleIE(RadioFranceBaseIE):
|
||||
}))
|
||||
|
||||
def _real_extract(self, url):
|
||||
station, date = self._match_valid_url(url).group('station', 'date')
|
||||
station = self._match_valid_url(url).group('station')
|
||||
webpage = self._download_webpage(url, station)
|
||||
grid_data = self._extract_data_from_webpage(webpage, station, 'grid')
|
||||
upload_date = strftime_or_none(grid_data.get('date'), '%Y%m%d')
|
||||
|
@@ -321,7 +321,7 @@ class RCTIPlusSeriesIE(RCTIPlusBaseIE):
|
||||
f'Only {video_type} will be downloaded. '
|
||||
f'To download everything from the series, remove "/{video_type}" from the URL')
|
||||
|
||||
series_meta, meta_paths = self._call_api(
|
||||
series_meta, _ = self._call_api(
|
||||
f'https://api.rctiplus.com/api/v1/program/{series_id}/detail', display_id, 'Downloading series metadata')
|
||||
metadata = {
|
||||
'age_limit': try_get(series_meta, lambda x: self._AGE_RATINGS[x['age_restriction'][0]['code']]),
|
||||
|
@@ -12,7 +12,7 @@ from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class SubstackIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?P<username>[\w-]+)\.substack\.com/p/(?P<id>[\w-]+)'
|
||||
_VALID_URL = r'https?://[\w-]+\.substack\.com/p/(?P<id>[\w-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://haleynahman.substack.com/p/i-made-a-vlog?s=r',
|
||||
'md5': 'f27e4fc6252001d48d479f45e65cdfd5',
|
||||
@@ -116,7 +116,7 @@ class SubstackIE(InfoExtractor):
|
||||
return formats, subtitles
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id, username = self._match_valid_url(url).group('id', 'username')
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
webpage_info = self._parse_json(self._search_json(
|
||||
|
@@ -723,7 +723,7 @@ class VKWallPostIE(VKBaseIE):
|
||||
def _unmask_url(self, mask_url, vk_id):
|
||||
if 'audio_api_unavailable' in mask_url:
|
||||
extra = mask_url.split('?extra=')[1].split('#')
|
||||
func, base = self._decode(extra[1]).split(chr(11))
|
||||
_, base = self._decode(extra[1]).split(chr(11))
|
||||
mask_url = list(self._decode(extra[0]))
|
||||
url_len = len(mask_url)
|
||||
indexes = [None] * url_len
|
||||
|
@@ -2760,7 +2760,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
if max_depth == 1 and parent:
|
||||
return
|
||||
|
||||
max_comments, max_parents, max_replies, max_replies_per_thread, *_ = (
|
||||
_max_comments, max_parents, max_replies, max_replies_per_thread, *_ = (
|
||||
int_or_none(p, default=sys.maxsize) for p in self._configuration_arg('max_comments') + [''] * 4)
|
||||
|
||||
continuation = self._extract_continuation(root_continuation_data)
|
||||
|
@@ -476,7 +476,7 @@ class ZingMp3UserIE(ZingMp3BaseIE):
|
||||
|
||||
class ZingMp3HubIE(ZingMp3BaseIE):
|
||||
IE_NAME = 'zingmp3:hub'
|
||||
_VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<type>hub)/(?P<regions>[^/]+)/(?P<id>[^\.]+)'
|
||||
_VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<type>hub)/[^/?#]+/(?P<id>[^./?#]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://zingmp3.vn/hub/Nhac-Moi/IWZ9Z0CA.html',
|
||||
'info_dict': {
|
||||
@@ -496,7 +496,7 @@ class ZingMp3HubIE(ZingMp3BaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
song_id, regions, url_type = self._match_valid_url(url).group('id', 'regions', 'type')
|
||||
song_id, url_type = self._match_valid_url(url).group('id', 'type')
|
||||
hub_detail = self._call_api(url_type, {'id': song_id})
|
||||
entries = self._parse_items(traverse_obj(hub_detail, (
|
||||
'sections', lambda _, v: v['sectionId'] == 'hub', 'items', ...)))
|
||||
|
@@ -200,7 +200,7 @@ def wrap_request_errors(func):
|
||||
|
||||
|
||||
def _socket_connect(ip_addr, timeout, source_address):
|
||||
af, socktype, proto, canonname, sa = ip_addr
|
||||
af, socktype, proto, _canonname, sa = ip_addr
|
||||
sock = socket.socket(af, socktype, proto)
|
||||
try:
|
||||
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
|
||||
@@ -215,7 +215,7 @@ def _socket_connect(ip_addr, timeout, source_address):
|
||||
|
||||
|
||||
def create_socks_proxy_socket(dest_addr, proxy_args, proxy_ip_addr, timeout, source_address):
|
||||
af, socktype, proto, canonname, sa = proxy_ip_addr
|
||||
af, socktype, proto, _canonname, sa = proxy_ip_addr
|
||||
sock = sockssocket(af, socktype, proto)
|
||||
try:
|
||||
connect_proxy_args = proxy_args.copy()
|
||||
|
@@ -4770,7 +4770,7 @@ def jwt_encode(payload_data, key, *, alg='HS256', headers=None):
|
||||
|
||||
# can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
|
||||
def jwt_decode_hs256(jwt):
|
||||
header_b64, payload_b64, signature_b64 = jwt.split('.')
|
||||
_header_b64, payload_b64, _signature_b64 = jwt.split('.')
|
||||
# add trailing ='s that may have been stripped, superfluous ='s are ignored
|
||||
return json.loads(base64.urlsafe_b64decode(f'{payload_b64}==='))
|
||||
|
||||
|
Reference in New Issue
Block a user