[cleanup] Upgrade syntax

Using https://github.com/asottile/pyupgrade

1. `__future__` imports and `coding: utf-8` were removed
2. Files were rewritten with `pyupgrade --py36-plus --keep-percent-format`
3. f-strings were cherry-picked from `pyupgrade --py36-plus`

Extractors are left untouched (except removing header) to avoid unnecessary merge conflicts
This commit is contained in:
pukkandan
2022-04-11 20:40:28 +05:30
parent f9934b9614
commit 86e5f3ed2e
1009 changed files with 375 additions and 3224 deletions

View File

@@ -1,8 +1,4 @@
#!/usr/bin/env python3
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import collections
import contextlib
import datetime
@@ -165,7 +161,7 @@ if compat_os_name == 'nt':
import ctypes
class YoutubeDL(object):
class YoutubeDL:
"""YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the
@@ -501,7 +497,7 @@ class YoutubeDL(object):
care about HLS. (only for youtube)
"""
_NUMERIC_FIELDS = set((
_NUMERIC_FIELDS = {
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
'timestamp', 'release_timestamp',
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
@@ -509,7 +505,7 @@ class YoutubeDL(object):
'start_time', 'end_time',
'chapter_number', 'season_number', 'episode_number',
'track_number', 'disc_number', 'release_year',
))
}
_format_fields = {
# NB: Keep in sync with the docstring of extractor/common.py
@@ -576,7 +572,7 @@ class YoutubeDL(object):
def check_deprecated(param, option, suggestion):
if self.params.get(param) is not None:
self.report_warning('%s is deprecated. Use %s instead' % (option, suggestion))
self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
return True
return False
@@ -693,7 +689,7 @@ class YoutubeDL(object):
with locked_file(fn, 'r', encoding='utf-8') as archive_file:
for line in archive_file:
self.archive.add(line.strip())
except IOError as ioe:
except OSError as ioe:
if ioe.errno != errno.ENOENT:
raise
return False
@@ -990,11 +986,9 @@ class YoutubeDL(object):
outtmpl_dict.update({
k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items()
if outtmpl_dict.get(k) is None})
for key, val in outtmpl_dict.items():
for _, val in outtmpl_dict.items():
if isinstance(val, bytes):
self.report_warning(
'Parameter outtmpl is bytes, but should be a unicode string. '
'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
self.report_warning('Parameter outtmpl is bytes, but should be a unicode string')
return outtmpl_dict
def get_output_path(self, dir_type='', filename=None):
@@ -1013,7 +1007,7 @@ class YoutubeDL(object):
# '%%' intact for template dict substitution step. Working around
# with boundary-alike separator hack.
sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
# outtmpl should be expand_path'ed before template dict substitution
# because meta fields may contain env variables we don't want to
@@ -1173,7 +1167,7 @@ class YoutubeDL(object):
fmt = outer_mobj.group('format')
if fmt == 's' and value is not None and key in field_size_compat_map.keys():
fmt = '0{:d}d'.format(field_size_compat_map[key])
fmt = f'0{field_size_compat_map[key]:d}d'
value = default if value is None else value if replacement is None else replacement
@@ -1188,7 +1182,7 @@ class YoutubeDL(object):
value = map(str, variadic(value) if '#' in flags else [value])
value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
elif fmt[-1] == 'B': # bytes
value = f'%{str_fmt}'.encode('utf-8') % str(value).encode('utf-8')
value = f'%{str_fmt}'.encode() % str(value).encode('utf-8')
value, fmt = value.decode('utf-8', 'ignore'), 's'
elif fmt[-1] == 'U': # unicode normalized
value, fmt = unicodedata.normalize(
@@ -1301,7 +1295,7 @@ class YoutubeDL(object):
if date is not None:
dateRange = self.params.get('daterange', DateRange())
if date not in dateRange:
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
view_count = info_dict.get('view_count')
if view_count is not None:
min_views = self.params.get('min_views')
@@ -1765,14 +1759,14 @@ class YoutubeDL(object):
x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg % n_entries))
self.to_screen(f'[{ie_result["extractor"]}] playlist {playlist}: {msg % n_entries}')
failures = 0
max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
for i, entry_tuple in enumerate(entries, 1):
playlist_index, entry = entry_tuple
if 'playlist-index' in self.params.get('compat_opts', []):
playlist_index = playlistitems[i - 1] if playlistitems else i + playliststart - 1
self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
self.to_screen(f'[download] Downloading video {i} of {n_entries}')
# This __x_forwarded_for_ip thing is a bit ugly but requires
# minimal changes
if x_forwarded_for:
@@ -1940,7 +1934,7 @@ class YoutubeDL(object):
def syntax_error(note, start):
message = (
'Invalid format specification: '
'{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
'{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
return SyntaxError(message)
PICKFIRST = 'PICKFIRST'
@@ -2044,7 +2038,7 @@ class YoutubeDL(object):
raise syntax_error('Expected a selector', start)
current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
else:
raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
raise syntax_error(f'Operator not recognized: "{string}"', start)
elif type == tokenize.ENDMARKER:
break
if current_selector:
@@ -2244,7 +2238,7 @@ class YoutubeDL(object):
except tokenize.TokenError:
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
class TokenIterator(object):
class TokenIterator:
def __init__(self, tokens):
self.tokens = tokens
self.counter = 0
@@ -2644,7 +2638,7 @@ class YoutubeDL(object):
if max_downloads_reached:
break
write_archive = set(f.get('__write_download_archive', False) for f in formats_to_download)
write_archive = {f.get('__write_download_archive', False) for f in formats_to_download}
assert write_archive.issubset({True, False, 'ignore'})
if True in write_archive and False not in write_archive:
self.record_download_archive(info_dict)
@@ -2712,7 +2706,7 @@ class YoutubeDL(object):
for lang in requested_langs:
formats = available_subs.get(lang)
if formats is None:
self.report_warning('%s subtitles not available for %s' % (lang, video_id))
self.report_warning(f'{lang} subtitles not available for {video_id}')
continue
for ext in formats_preference:
if ext == 'best':
@@ -2755,7 +2749,7 @@ class YoutubeDL(object):
tmpl = format_tmpl(tmpl)
self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
if self._ensure_dir_exists(filename):
with io.open(filename, 'a', encoding='utf-8') as f:
with open(filename, 'a', encoding='utf-8') as f:
f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
def __forced_printings(self, info_dict, filename, incomplete):
@@ -2920,11 +2914,11 @@ class YoutubeDL(object):
else:
try:
self.to_screen('[info] Writing video annotations to: ' + annofn)
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations'])
except (KeyError, TypeError):
self.report_warning('There are no annotations to write.')
except (OSError, IOError):
except OSError:
self.report_error('Cannot write annotations file: ' + annofn)
return
@@ -2943,13 +2937,13 @@ class YoutubeDL(object):
return True
try:
self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
newline='\r\n' if link_type == 'url' else '\n') as linkfile:
with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
newline='\r\n' if link_type == 'url' else '\n') as linkfile:
template_vars = {'url': url}
if link_type == 'desktop':
template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
except (OSError, IOError):
except OSError:
self.report_error(f'Cannot write internet shortcut {linkfn}')
return False
return True
@@ -3014,10 +3008,10 @@ class YoutubeDL(object):
return False
# Check extension
exts = set(format.get('ext') for format in formats)
exts = {format.get('ext') for format in formats}
COMPATIBLE_EXTS = (
set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
set(('webm',)),
{'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'},
{'webm'},
)
for ext_sets in COMPATIBLE_EXTS:
if ext_sets.issuperset(exts):
@@ -3050,7 +3044,7 @@ class YoutubeDL(object):
os.path.splitext(filename)[0]
if filename_real_ext in (old_ext, new_ext)
else filename)
return '%s.%s' % (filename_wo_ext, ext)
return f'{filename_wo_ext}.{ext}'
# Ensure filename always has a correct extension for successful merge
full_filename = correct_ext(full_filename)
@@ -3135,10 +3129,10 @@ class YoutubeDL(object):
except network_exceptions as err:
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
return
except (OSError, IOError) as err:
except OSError as err:
raise UnavailableVideoError(err)
except (ContentTooShortError, ) as err:
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
return
if success and full_filename != '-':
@@ -3343,7 +3337,7 @@ class YoutubeDL(object):
self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
try:
os.remove(encodeFilename(old_filename))
except (IOError, OSError):
except OSError:
self.report_warning('Unable to remove downloaded original file')
if old_filename in infodict['__files_to_move']:
del infodict['__files_to_move'][old_filename]
@@ -3388,7 +3382,7 @@ class YoutubeDL(object):
break
else:
return
return '%s %s' % (extractor.lower(), video_id)
return f'{extractor.lower()} {video_id}'
def in_download_archive(self, info_dict):
fn = self.params.get('download_archive')
@@ -3791,7 +3785,7 @@ class YoutubeDL(object):
try:
write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
return True
except (OSError, IOError):
except OSError:
self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
return None
@@ -3812,9 +3806,9 @@ class YoutubeDL(object):
else:
try:
self.to_screen(f'[info] Writing {label} description to: {descfn}')
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(ie_result['description'])
except (OSError, IOError):
except OSError:
self.report_error(f'Cannot write {label} description file {descfn}')
return None
return True
@@ -3848,12 +3842,12 @@ class YoutubeDL(object):
try:
# Use newline='' to prevent conversion of newline characters
# See https://github.com/ytdl-org/youtube-dl/issues/10268
with io.open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_info['data'])
sub_info['filepath'] = sub_filename
ret.append((sub_filename, sub_filename_final))
continue
except (OSError, IOError):
except OSError:
self.report_error(f'Cannot write video subtitles file {sub_filename}')
return None

View File

@@ -1,11 +1,8 @@
#!/usr/bin/env python3
# coding: utf-8
f'You are using an unsupported version of Python. Only Python versions 3.6 and above are supported by yt-dlp' # noqa: F541
__license__ = 'Public Domain'
import io
import itertools
import os
import random
@@ -67,13 +64,12 @@ def get_urls(urls, batchfile, verbose):
'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D'))
batchfd = sys.stdin
else:
batchfd = io.open(
expand_path(batchfile),
'r', encoding='utf-8', errors='ignore')
batchfd = open(
expand_path(batchfile), encoding='utf-8', errors='ignore')
batch_urls = read_batch_urls(batchfd)
if verbose:
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
except IOError:
except OSError:
sys.exit('ERROR: batch file %s could not be read' % batchfile)
_enc = preferredencoding()
return [

View File

@@ -1,6 +1,4 @@
#!/usr/bin/env python3
from __future__ import unicode_literals
# Execute with
# $ python -m yt_dlp

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
from math import ceil
from .compat import (

View File

@@ -1,7 +1,4 @@
from __future__ import unicode_literals
import errno
import io
import json
import os
import re
@@ -15,7 +12,7 @@ from .utils import (
)
class Cache(object):
class Cache:
def __init__(self, ydl):
self._ydl = ydl
@@ -31,7 +28,7 @@ class Cache(object):
'invalid section %r' % section
assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key
return os.path.join(
self._get_root_dir(), section, '%s.%s' % (key, dtype))
self._get_root_dir(), section, f'{key}.{dtype}')
@property
def enabled(self):
@@ -54,8 +51,7 @@ class Cache(object):
write_json_file(data, fn)
except Exception:
tb = traceback.format_exc()
self._ydl.report_warning(
'Writing cache to %r failed: %s' % (fn, tb))
self._ydl.report_warning(f'Writing cache to {fn!r} failed: {tb}')
def load(self, section, key, dtype='json', default=None):
assert dtype in ('json',)
@@ -66,17 +62,16 @@ class Cache(object):
cache_fn = self._get_cache_fn(section, key, dtype)
try:
try:
with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
with open(cache_fn, encoding='utf-8') as cachef:
self._ydl.write_debug(f'Loading {section}.{key} from cache')
return json.load(cachef)
except ValueError:
try:
file_size = os.path.getsize(cache_fn)
except (OSError, IOError) as oe:
except OSError as oe:
file_size = str(oe)
self._ydl.report_warning(
'Cache retrieval from %s failed (%s)' % (cache_fn, file_size))
except IOError:
self._ydl.report_warning(f'Cache retrieval from {cache_fn} failed ({file_size})')
except OSError:
pass # No cache available
return default

View File

@@ -1,5 +1,3 @@
# coding: utf-8
import asyncio
import base64
import collections

View File

@@ -125,7 +125,7 @@ def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(),
elif browser_name in CHROMIUM_BASED_BROWSERS:
return _extract_chrome_cookies(browser_name, profile, keyring, logger)
else:
raise ValueError('unknown browser: {}'.format(browser_name))
raise ValueError(f'unknown browser: {browser_name}')
def _extract_firefox_cookies(profile, logger):
@@ -144,8 +144,8 @@ def _extract_firefox_cookies(profile, logger):
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite', logger)
if cookie_database_path is None:
raise FileNotFoundError('could not find firefox cookies database in {}'.format(search_root))
logger.debug('Extracting cookies from: "{}"'.format(cookie_database_path))
raise FileNotFoundError(f'could not find firefox cookies database in {search_root}')
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
cursor = None
@@ -164,7 +164,7 @@ def _extract_firefox_cookies(profile, logger):
path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False,
comment=None, comment_url=None, rest={})
jar.set_cookie(cookie)
logger.info('Extracted {} cookies from firefox'.format(len(jar)))
logger.info(f'Extracted {len(jar)} cookies from firefox')
return jar
finally:
if cursor is not None:
@@ -179,7 +179,7 @@ def _firefox_browser_dir():
elif sys.platform == 'darwin':
return os.path.expanduser('~/Library/Application Support/Firefox')
else:
raise ValueError('unsupported platform: {}'.format(sys.platform))
raise ValueError(f'unsupported platform: {sys.platform}')
def _get_chromium_based_browser_settings(browser_name):
@@ -219,7 +219,7 @@ def _get_chromium_based_browser_settings(browser_name):
}[browser_name]
else:
raise ValueError('unsupported platform: {}'.format(sys.platform))
raise ValueError(f'unsupported platform: {sys.platform}')
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
@@ -242,7 +242,7 @@ def _get_chromium_based_browser_settings(browser_name):
def _extract_chrome_cookies(browser_name, profile, keyring, logger):
logger.info('Extracting cookies from {}'.format(browser_name))
logger.info(f'Extracting cookies from {browser_name}')
if not SQLITE_AVAILABLE:
logger.warning(('Cannot extract cookies from {} without sqlite3 support. '
@@ -260,13 +260,13 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger):
if config['supports_profiles']:
search_root = os.path.join(config['browser_dir'], profile)
else:
logger.error('{} does not support profiles'.format(browser_name))
logger.error(f'{browser_name} does not support profiles')
search_root = config['browser_dir']
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies', logger)
if cookie_database_path is None:
raise FileNotFoundError('could not find {} cookies database in "{}"'.format(browser_name, search_root))
logger.debug('Extracting cookies from: "{}"'.format(cookie_database_path))
raise FileNotFoundError(f'could not find {browser_name} cookies database in "{search_root}"')
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger, keyring=keyring)
@@ -295,13 +295,13 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger):
unencrypted_cookies += 1
jar.set_cookie(cookie)
if failed_cookies > 0:
failed_message = ' ({} could not be decrypted)'.format(failed_cookies)
failed_message = f' ({failed_cookies} could not be decrypted)'
else:
failed_message = ''
logger.info('Extracted {} cookies from {}{}'.format(len(jar), browser_name, failed_message))
logger.info(f'Extracted {len(jar)} cookies from {browser_name}{failed_message}')
counts = decryptor.cookie_counts.copy()
counts['unencrypted'] = unencrypted_cookies
logger.debug('cookie version breakdown: {}'.format(counts))
logger.debug(f'cookie version breakdown: {counts}')
return jar
finally:
if cursor is not None:
@@ -492,7 +492,7 @@ def _extract_safari_cookies(profile, logger):
if profile is not None:
logger.error('safari does not support profiles')
if sys.platform != 'darwin':
raise ValueError('unsupported platform: {}'.format(sys.platform))
raise ValueError(f'unsupported platform: {sys.platform}')
cookies_path = os.path.expanduser('~/Library/Cookies/Cookies.binarycookies')
@@ -506,7 +506,7 @@ def _extract_safari_cookies(profile, logger):
cookies_data = f.read()
jar = parse_safari_cookies(cookies_data, logger=logger)
logger.info('Extracted {} cookies from safari'.format(len(jar)))
logger.info(f'Extracted {len(jar)} cookies from safari')
return jar
@@ -522,7 +522,7 @@ class DataParser:
def read_bytes(self, num_bytes):
if num_bytes < 0:
raise ParserError('invalid read of {} bytes'.format(num_bytes))
raise ParserError(f'invalid read of {num_bytes} bytes')
end = self.cursor + num_bytes
if end > len(self._data):
raise ParserError('reached end of input')
@@ -533,7 +533,7 @@ class DataParser:
def expect_bytes(self, expected_value, message):
value = self.read_bytes(len(expected_value))
if value != expected_value:
raise ParserError('unexpected value: {} != {} ({})'.format(value, expected_value, message))
raise ParserError(f'unexpected value: {value} != {expected_value} ({message})')
def read_uint(self, big_endian=False):
data_format = '>I' if big_endian else '<I'
@@ -557,7 +557,7 @@ class DataParser:
self._logger.debug('skipping {} bytes ({}): {}'.format(
num_bytes, description, self.read_bytes(num_bytes)))
elif num_bytes < 0:
raise ParserError('invalid skip of {} bytes'.format(num_bytes))
raise ParserError(f'invalid skip of {num_bytes} bytes')
def skip_to(self, offset, description='unknown'):
self.skip(offset - self.cursor, description)
@@ -584,7 +584,7 @@ def _parse_safari_cookies_page(data, jar, logger):
number_of_cookies = p.read_uint()
record_offsets = [p.read_uint() for _ in range(number_of_cookies)]
if number_of_cookies == 0:
logger.debug('a cookies page of size {} has no cookies'.format(len(data)))
logger.debug(f'a cookies page of size {len(data)} has no cookies')
return
p.skip_to(record_offsets[0], 'unknown page header field')
@@ -730,7 +730,7 @@ def _choose_linux_keyring(logger):
SelectBackend
"""
desktop_environment = _get_linux_desktop_environment(os.environ)
logger.debug('detected desktop environment: {}'.format(desktop_environment.name))
logger.debug(f'detected desktop environment: {desktop_environment.name}')
if desktop_environment == _LinuxDesktopEnvironment.KDE:
linux_keyring = _LinuxKeyring.KWALLET
elif desktop_environment == _LinuxDesktopEnvironment.OTHER:
@@ -764,10 +764,10 @@ def _get_kwallet_network_wallet(logger):
return default_wallet
else:
network_wallet = stdout.decode('utf-8').strip()
logger.debug('NetworkWallet = "{}"'.format(network_wallet))
logger.debug(f'NetworkWallet = "{network_wallet}"')
return network_wallet
except Exception as e:
logger.warning('exception while obtaining NetworkWallet: {}'.format(e))
logger.warning(f'exception while obtaining NetworkWallet: {e}')
return default_wallet
@@ -785,8 +785,8 @@ def _get_kwallet_password(browser_keyring_name, logger):
try:
proc = Popen([
'kwallet-query',
'--read-password', '{} Safe Storage'.format(browser_keyring_name),
'--folder', '{} Keys'.format(browser_keyring_name),
'--read-password', f'{browser_keyring_name} Safe Storage',
'--folder', f'{browser_keyring_name} Keys',
network_wallet
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
@@ -818,7 +818,7 @@ def _get_kwallet_password(browser_keyring_name, logger):
def _get_gnome_keyring_password(browser_keyring_name, logger):
if not SECRETSTORAGE_AVAILABLE:
logger.error('secretstorage not available {}'.format(SECRETSTORAGE_UNAVAILABLE_REASON))
logger.error(f'secretstorage not available {SECRETSTORAGE_UNAVAILABLE_REASON}')
return b''
# the Gnome keyring does not seem to organise keys in the same way as KWallet,
# using `dbus-monitor` during startup, it can be observed that chromium lists all keys
@@ -827,7 +827,7 @@ def _get_gnome_keyring_password(browser_keyring_name, logger):
with contextlib.closing(secretstorage.dbus_init()) as con:
col = secretstorage.get_default_collection(con)
for item in col.get_all_items():
if item.get_label() == '{} Safe Storage'.format(browser_keyring_name):
if item.get_label() == f'{browser_keyring_name} Safe Storage':
return item.get_secret()
else:
logger.error('failed to read from keyring')
@@ -861,7 +861,7 @@ def _get_mac_keyring_password(browser_keyring_name, logger):
['security', 'find-generic-password',
'-w', # write password to stdout
'-a', browser_keyring_name, # match 'account'
'-s', '{} Safe Storage'.format(browser_keyring_name)], # match 'service'
'-s', f'{browser_keyring_name} Safe Storage'], # match 'service'
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
@@ -879,7 +879,7 @@ def _get_windows_v10_key(browser_root, logger):
logger.error('could not find local state file')
return None
logger.debug(f'Found local state file at "{path}"')
with open(path, 'r', encoding='utf8') as f:
with open(path, encoding='utf8') as f:
data = json.load(f)
try:
base64_key = data['os_crypt']['encrypted_key']
@@ -966,7 +966,7 @@ def _open_database_copy(database_path, tmpdir):
def _get_column_names(cursor, table_name):
table_info = cursor.execute('PRAGMA table_info({})'.format(table_name)).fetchall()
table_info = cursor.execute(f'PRAGMA table_info({table_name})').fetchall()
return [row[1].decode('utf-8') for row in table_info]

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
from ..compat import compat_str
from ..utils import (
determine_protocol,

View File

@@ -1,5 +1,3 @@
from __future__ import division, unicode_literals
import os
import re
import time
@@ -25,7 +23,7 @@ from ..minicurses import (
)
class FileDownloader(object):
class FileDownloader:
"""File Downloader class.
File downloader objects are the ones responsible of downloading the
@@ -219,7 +217,7 @@ class FileDownloader(object):
while True:
try:
return func(self, *args, **kwargs)
except (IOError, OSError) as err:
except OSError as err:
retry = retry + 1
if retry > file_access_retries or err.errno not in (errno.EACCES, errno.EINVAL):
if not fatal:
@@ -486,4 +484,4 @@ class FileDownloader(object):
if exe is None:
exe = os.path.basename(str_args[0])
self.write_debug('%s command line: %s' % (exe, shell_quote(str_args)))
self.write_debug(f'{exe} command line: {shell_quote(str_args)}')

View File

@@ -1,4 +1,3 @@
from __future__ import unicode_literals
import time
from ..downloader import get_suitable_downloader
@@ -46,7 +45,7 @@ class DashSegmentsFD(FragmentFD):
if real_downloader:
self.to_screen(
'[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename()))
f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}')
info_dict['fragments'] = list(fragments_to_download)
fd = real_downloader(self.ydl, self.params)
return fd.real_download(filename, info_dict)

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
import os.path
import re
import subprocess
@@ -56,7 +54,7 @@ class ExternalFD(FragmentFD):
}
if filename != '-':
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('\r[%s] Downloaded %s bytes' % (self.get_basename(), fsize))
self.to_screen(f'\r[{self.get_basename()}] Downloaded {fsize} bytes')
self.try_rename(tmpfilename, filename)
status.update({
'downloaded_bytes': fsize,
@@ -157,7 +155,7 @@ class ExternalFD(FragmentFD):
fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index)
try:
src, _ = self.sanitize_open(fragment_filename, 'rb')
except IOError as err:
except OSError as err:
if skip_unavailable_fragments and frag_index > 1:
self.report_skip_fragment(frag_index, err)
continue
@@ -179,7 +177,7 @@ class CurlFD(ExternalFD):
cmd = [self.exe, '--location', '-o', tmpfilename, '--compressed']
if info_dict.get('http_headers') is not None:
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
cmd += ['--header', f'{key}: {val}']
cmd += self._bool_option('--continue-at', 'continuedl', '-', '0')
cmd += self._valueless_option('--silent', 'noprogress')
@@ -216,7 +214,7 @@ class AxelFD(ExternalFD):
cmd = [self.exe, '-o', tmpfilename]
if info_dict.get('http_headers') is not None:
for key, val in info_dict['http_headers'].items():
cmd += ['-H', '%s: %s' % (key, val)]
cmd += ['-H', f'{key}: {val}']
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
@@ -229,7 +227,7 @@ class WgetFD(ExternalFD):
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies', '--compression=auto']
if info_dict.get('http_headers') is not None:
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
cmd += ['--header', f'{key}: {val}']
cmd += self._option('--limit-rate', 'ratelimit')
retry = self._option('--tries', 'retries')
if len(retry) == 2:
@@ -240,7 +238,7 @@ class WgetFD(ExternalFD):
proxy = self.params.get('proxy')
if proxy:
for var in ('http_proxy', 'https_proxy'):
cmd += ['--execute', '%s=%s' % (var, proxy)]
cmd += ['--execute', f'{var}={proxy}']
cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate')
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
@@ -271,7 +269,7 @@ class Aria2cFD(ExternalFD):
if info_dict.get('http_headers') is not None:
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
cmd += ['--header', f'{key}: {val}']
cmd += self._option('--max-overall-download-limit', 'ratelimit')
cmd += self._option('--interface', 'source_address')
cmd += self._option('--all-proxy', 'proxy')
@@ -289,10 +287,10 @@ class Aria2cFD(ExternalFD):
dn = os.path.dirname(tmpfilename)
if dn:
if not os.path.isabs(dn):
dn = '.%s%s' % (os.path.sep, dn)
dn = f'.{os.path.sep}{dn}'
cmd += ['--dir', dn + os.path.sep]
if 'fragments' not in info_dict:
cmd += ['--out', '.%s%s' % (os.path.sep, os.path.basename(tmpfilename))]
cmd += ['--out', f'.{os.path.sep}{os.path.basename(tmpfilename)}']
cmd += ['--auto-file-renaming=false']
if 'fragments' in info_dict:
@@ -320,7 +318,7 @@ class HttpieFD(ExternalFD):
if info_dict.get('http_headers') is not None:
for key, val in info_dict['http_headers'].items():
cmd += ['%s:%s' % (key, val)]
cmd += [f'{key}:{val}']
return cmd
@@ -393,7 +391,7 @@ class FFmpegFD(ExternalFD):
headers = handle_youtubedl_headers(info_dict['http_headers'])
args += [
'-headers',
''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())]
''.join(f'{key}: {val}\r\n' for key, val in headers.items())]
env = None
proxy = self.params.get('proxy')

View File

@@ -1,5 +1,3 @@
from __future__ import division, unicode_literals
import io
import itertools
import time

View File

@@ -1,5 +1,3 @@
from __future__ import division, unicode_literals
import threading
from .common import FileDownloader

View File

@@ -1,5 +1,3 @@
from __future__ import division, unicode_literals
import http.client
import json
import math
@@ -172,8 +170,7 @@ class FragmentFD(FileDownloader):
total_frags_str += ' (not including %d ad)' % ad_frags
else:
total_frags_str = 'unknown (live)'
self.to_screen(
'[%s] Total fragments: %s' % (self.FD_NAME, total_frags_str))
self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}')
self.report_destination(ctx['filename'])
dl = HttpQuietDownloader(
self.ydl,
@@ -342,8 +339,7 @@ class FragmentFD(FileDownloader):
total_frags_str += ' (not including %d ad)' % ad_frags
else:
total_frags_str = 'unknown (live)'
self.to_screen(
'[%s] Total fragments: %s' % (self.FD_NAME, total_frags_str))
self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}')
tmpfilename = self.temp_name(ctx['filename'])

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
import re
import io
import binascii
@@ -102,8 +100,7 @@ class HlsFD(FragmentFD):
if real_downloader and not real_downloader.supports_manifest(s):
real_downloader = None
if real_downloader:
self.to_screen(
'[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename()))
self.to_screen(f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}')
def is_ad_fragment_start(s):
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
import os
import ssl
import time
@@ -221,10 +219,12 @@ class HttpFD(FileDownloader):
min_data_len = self.params.get('min_filesize')
max_data_len = self.params.get('max_filesize')
if min_data_len is not None and data_len < min_data_len:
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
self.to_screen(
f'\r[download] File is smaller than min-filesize ({data_len} bytes < {min_data_len} bytes). Aborting.')
return False
if max_data_len is not None and data_len > max_data_len:
self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
self.to_screen(
f'\r[download] File is larger than max-filesize ({data_len} bytes > {max_data_len} bytes). Aborting.')
return False
byte_counter = 0 + ctx.resume_len
@@ -265,7 +265,7 @@ class HttpFD(FileDownloader):
assert ctx.stream is not None
ctx.filename = self.undo_temp_name(ctx.tmpfilename)
self.report_destination(ctx.filename)
except (OSError, IOError) as err:
except OSError as err:
self.report_error('unable to open for writing: %s' % str(err))
return False
@@ -277,7 +277,7 @@ class HttpFD(FileDownloader):
try:
ctx.stream.write(data_block)
except (IOError, OSError) as err:
except OSError as err:
self.to_stderr('\n')
self.report_error('unable to write data: %s' % str(err))
return False

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
import time
import binascii
import io

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import io
import quopri
import re

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import threading
from .common import FileDownloader

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
import os
import re
import subprocess

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
import os
import subprocess
@@ -32,7 +30,7 @@ class RtspFD(FileDownloader):
retval = subprocess.call(args)
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
self.to_screen(f'\r[{args[0]}] {fsize} bytes')
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,

View File

@@ -1,5 +1,3 @@
from __future__ import division, unicode_literals
import json
import time

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
import hashlib
import hmac
import re

View File

@@ -1,7 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .amp import AMPIE
from .common import InfoExtractor
from ..utils import (

View File

@@ -1,7 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor

View File

@@ -1,7 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import base64
import binascii
import json

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import json
import re
import time

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
import functools
import re

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import json
from .turner import TurnerBaseIE

View File

@@ -1,7 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .theplatform import ThePlatformIE
from ..utils import (
ExtractorError,

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import functools
import re

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_iso8601,

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .youtube import YoutubeIE
from .vimeo import VimeoIE

View File

@@ -1,4 +1,3 @@
# coding: utf-8
from .common import InfoExtractor
from ..utils import int_or_none

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .theplatform import ThePlatformIE

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
determine_ext,

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
import urllib.parse

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import base64
import hashlib
import json

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
from .nfl import NFLTokenGenerator
__all__ = [

View File

@@ -1,6 +1,3 @@
from __future__ import unicode_literals
class TokenGenerator:
def generate(self, anvack, mcp_id):
raise NotImplementedError('This method must be implemented by subclasses')

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
import json
from .common import TokenGenerator

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .yahoo import YahooIE

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
get_element_by_id,

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
str_to_int,

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
import re
import json

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
@@ -479,7 +476,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
return self._parse_json(self._search_regex(
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
(fr'{regex}\s*{self._YT_INITIAL_BOUNDARY_RE}',
regex), webpage, name, default='{}'), video_id, fatal=False)
def _extract_webpage_title(self, webpage):
@@ -597,7 +594,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
response = self._call_cdx_api(
video_id, f'https://www.youtube.com/watch?v={video_id}',
filters=['mimetype:text/html'], collapse=['timestamp:6', 'digest'], query={'matchType': 'prefix'}) or []
all_captures = sorted([int_or_none(r['timestamp']) for r in response if int_or_none(r['timestamp']) is not None])
all_captures = sorted(int_or_none(r['timestamp']) for r in response if int_or_none(r['timestamp']) is not None)
# Prefer the new polymer UI captures as we support extracting more metadata from them
# WBM captures seem to all switch to this layout ~July 2020

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import json
import re

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import functools
import re

View File

@@ -1,7 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import unified_strdate

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import datetime
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import itertools
import time

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import random
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import base64
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import datetime
import hashlib
import hmac

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor

View File

@@ -1,7 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import unescapeHTML

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import math
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .brightcove import BrightcoveNewIE
from ..utils import extract_attributes

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import random
import re
import time

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
import json
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import xml.etree.ElementTree
import functools
import itertools

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (

View File

@@ -1,7 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import url_basename

View File

@@ -1,7 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
from .mtv import MTVServicesInfoExtractor
from ..utils import unified_strdate

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError, urlencode_postdata

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,

View File

@@ -1,5 +1,3 @@
# coding: utf-8
import base64
import hashlib
import itertools

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .vk import VKIE
from ..compat import compat_b64decode

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
import itertools
import re

View File

@@ -1,5 +1,3 @@
from __future__ import unicode_literals
from .common import InfoExtractor

View File

@@ -1,7 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import parse_iso8601

View File

@@ -1,6 +1,3 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .amp import AMPIE
from ..utils import (

Some files were not shown because too many files have changed in this diff Show More