Merge branch 'main' into patch-17

This commit is contained in:
tcely 2025-03-14 15:05:33 -04:00 committed by GitHub
commit 9683a22fae
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 228 additions and 79 deletions

View File

@ -28,7 +28,9 @@ jobs:
pip install pipenv pip install pipenv
pipenv install --system --skip-lock pipenv install --system --skip-lock
- name: Set up Django environment - name: Set up Django environment
run: cp tubesync/tubesync/local_settings.py.example tubesync/tubesync/local_settings.py run: |
cp -v -p tubesync/tubesync/local_settings.py.example tubesync/tubesync/local_settings.py
cp -v -a -t "${Python3_ROOT_DIR}"/lib/python3.*/site-packages/yt_dlp/ patches/yt_dlp/*
- name: Run Django tests - name: Run Django tests
run: cd tubesync && python3 manage.py test --verbosity=2 run: cd tubesync && python3 manage.py test --verbosity=2
containerise: containerise:

View File

@ -275,10 +275,14 @@ RUN --mount=type=cache,id=apt-lib-cache,sharing=locked,target=/var/lib/apt \
pipenv \ pipenv \
pkgconf \ pkgconf \
python3 \ python3 \
python3-libsass \
python3-socks \
python3-wheel \ python3-wheel \
curl \ curl \
less \ less \
&& \ && \
# Link to the current python3 version
ln -v -s -f -T "$(find /usr/local/lib -name 'python3.[0-9]*' -type d -printf '%P\n' | sort -r -V | head -n 1)" /usr/local/lib/python3 && \
# Clean up # Clean up
apt-get -y autopurge && \ apt-get -y autopurge && \
apt-get -y autoclean && \ apt-get -y autoclean && \
@ -346,12 +350,18 @@ RUN --mount=type=tmpfs,target=/cache \
COPY tubesync /app COPY tubesync /app
COPY tubesync/tubesync/local_settings.py.container /app/tubesync/local_settings.py COPY tubesync/tubesync/local_settings.py.container /app/tubesync/local_settings.py
# patch background_task
COPY patches/background_task/ \
/usr/local/lib/python3/dist-packages/background_task/
# patch yt_dlp
COPY patches/yt_dlp/ \
/usr/local/lib/python3/dist-packages/yt_dlp/
# Build app # Build app
RUN set -x && \ RUN set -x && \
# Make absolutely sure we didn't accidentally bundle a SQLite dev database # Make absolutely sure we didn't accidentally bundle a SQLite dev database
rm -rf /app/db.sqlite3 && \ rm -rf /app/db.sqlite3 && \
# Check nginx configuration
nginx -t && \
# Run any required app commands # Run any required app commands
/usr/bin/python3 -B /app/manage.py compilescss && \ /usr/bin/python3 -B /app/manage.py compilescss && \
/usr/bin/python3 -B /app/manage.py collectstatic --no-input --link && \ /usr/bin/python3 -B /app/manage.py collectstatic --no-input --link && \
@ -361,8 +371,6 @@ RUN set -x && \
mkdir -v -p /config/cache/pycache && \ mkdir -v -p /config/cache/pycache && \
mkdir -v -p /downloads/audio && \ mkdir -v -p /downloads/audio && \
mkdir -v -p /downloads/video && \ mkdir -v -p /downloads/video && \
# Link to the current python3 version
ln -v -s -f -T "$(find /usr/local/lib -name 'python3.[0-9]*' -type d -printf '%P\n' | sort -r -V | head -n 1)" /usr/local/lib/python3 && \
# Append software versions # Append software versions
ffmpeg_version=$(/usr/local/bin/ffmpeg -version | awk -v 'ev=31' '1 == NR && "ffmpeg" == $1 { print $3; ev=0; } END { exit ev; }') && \ ffmpeg_version=$(/usr/local/bin/ffmpeg -version | awk -v 'ev=31' '1 == NR && "ffmpeg" == $1 { print $3; ev=0; } END { exit ev; }') && \
test -n "${ffmpeg_version}" && \ test -n "${ffmpeg_version}" && \
@ -371,13 +379,8 @@ RUN set -x && \
# Copy root # Copy root
COPY config/root / COPY config/root /
# patch background_task # Check nginx configuration copied from config/root/etc
COPY patches/background_task/ \ RUN set -x && nginx -t
/usr/local/lib/python3/dist-packages/background_task/
# patch yt_dlp
COPY patches/yt_dlp/ \
/usr/local/lib/python3/dist-packages/yt_dlp/
# Create a healthcheck # Create a healthcheck
HEALTHCHECK --interval=1m --timeout=10s --start-period=3m CMD ["/app/healthcheck.py", "http://127.0.0.1:8080/healthcheck"] HEALTHCHECK --interval=1m --timeout=10s --start-period=3m CMD ["/app/healthcheck.py", "http://127.0.0.1:8080/healthcheck"]

10
Pipfile
View File

@ -8,17 +8,19 @@ autopep8 = "*"
[packages] [packages]
django = "*" django = "*"
django-sass-processor = "*" django-sass-processor = {extras = ["management-command"], version = "*"}
libsass = "*"
pillow = "*" pillow = "*"
whitenoise = "*" whitenoise = "*"
gunicorn = "*" gunicorn = "*"
django-compressor = "*"
httptools = "*" httptools = "*"
django-background-tasks = ">=1.2.8" django-background-tasks = ">=1.2.8"
django-basicauth = "*" django-basicauth = "*"
psycopg2-binary = "*" psycopg2-binary = "*"
mysqlclient = "*" mysqlclient = "*"
yt-dlp = "*" PySocks = "*"
urllib3 = {extras = ["socks"], version = "*"}
requests = {extras = ["socks"], version = "*"} requests = {extras = ["socks"], version = "*"}
yt-dlp = "*"
emoji = "*" emoji = "*"
brotli = "*"
html5lib = "*"

View File

@ -70,7 +70,7 @@ currently just Plex, to complete the PVR experience.
TubeSync is designed to be run in a container, such as via Docker or Podman. It also TubeSync is designed to be run in a container, such as via Docker or Podman. It also
works in a Docker Compose stack. `amd64` (most desktop PCs and servers) and `arm64` works in a Docker Compose stack. `amd64` (most desktop PCs and servers) and `arm64`
(modern ARM computers, such as the Rasperry Pi 3 or later) are supported. (modern ARM computers, such as the Raspberry Pi 3 or later) are supported.
Example (with Docker on *nix): Example (with Docker on *nix):
@ -356,7 +356,7 @@ etc.). Configuration of this is beyond the scope of this README.
Only two are supported, for the moment: Only two are supported, for the moment:
- `amd64` (most desktop PCs and servers) - `amd64` (most desktop PCs and servers)
- `arm64` - `arm64`
(modern ARM computers, such as the Rasperry Pi 3 or later) (modern ARM computers, such as the Raspberry Pi 3 or later)
Others may be made available, if there is demand. Others may be made available, if there is demand.

View File

@ -0,0 +1,5 @@
from yt_dlp.compat.compat_utils import passthrough_module
passthrough_module(__name__, '.patch')
del passthrough_module

View File

@ -0,0 +1,43 @@
from yt_dlp import YoutubeDL
from yt_dlp.utils import sanitize_url, LazyList
class PatchedYoutubeDL(YoutubeDL):
def _sanitize_thumbnails(self, info_dict):
thumbnails = info_dict.get('thumbnails')
if thumbnails is None:
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
if not thumbnails:
return
def check_thumbnails(thumbnails):
for t in thumbnails:
self.to_screen(f'[info] Testing thumbnail {t["id"]}: {t["url"]!r}')
try:
self.urlopen(HEADRequest(t['url']))
except network_exceptions as err:
self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
continue
yield t
self._sort_thumbnails(thumbnails)
for i, t in enumerate(thumbnails):
if t.get('id') is None:
t['id'] = str(i)
if t.get('width') and t.get('height'):
t['resolution'] = '%dx%d' % (t['width'], t['height'])
t['url'] = sanitize_url(t['url'])
if self.params.get('check_thumbnails') is True:
info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
else:
info_dict['thumbnails'] = thumbnails
YoutubeDL.__unpatched___sanitize_thumbnails = YoutubeDL._sanitize_thumbnails
YoutubeDL._sanitize_thumbnails = PatchedYoutubeDL._sanitize_thumbnails

View File

@ -1,6 +1,7 @@
import cProfile import cProfile
import emoji import emoji
import io import io
import os
import pstats import pstats
import string import string
import time import time
@ -10,6 +11,41 @@ from yt_dlp.utils import LazyList
from .errors import DatabaseConnectionError from .errors import DatabaseConnectionError
def getenv(key, default=None, /, *, integer=False, string=True):
'''
Guarantees a returned type from calling `os.getenv`
The caller can request the integer type,
or use the default string type.
'''
args = dict(key=key, default=default, integer=integer, string=string)
supported_types = dict(zip(args.keys(), (
(str,), # key
(
bool,
float,
int,
str,
None.__class__,
), # default
(bool,) * (len(args.keys()) - 2),
)))
unsupported_type_msg = 'Unsupported type for positional argument, "{}": {}'
for k, t in supported_types.items():
v = args[k]
assert isinstance(v, t), unsupported_type_msg.format(k, type(v))
d = str(default) if default is not None else None
r = os.getenv(key, d)
if r is None:
if string: r = str()
if integer: r = int()
elif integer:
r = int(float(r))
return r
def parse_database_connection_string(database_connection_string): def parse_database_connection_string(database_connection_string):
''' '''
Parses a connection string in a URL style format, such as: Parses a connection string in a URL style format, such as:

View File

@ -1507,17 +1507,35 @@ class Media(models.Model):
def calculate_episode_number(self): def calculate_episode_number(self):
if self.source.is_playlist: if self.source.is_playlist:
sorted_media = Media.objects.filter(source=self.source) sorted_media = Media.objects.filter(
source=self.source,
metadata__isnull=False,
).order_by(
'published',
'created',
'key',
)
else: else:
self_year = self.upload_date.year if self.upload_date else self.created.year self_year = self.created.year # unlikely to be accurate
filtered_media = Media.objects.filter(source=self.source, published__year=self_year) if self.published:
filtered_media = [m for m in filtered_media if m.upload_date is not None] self_year = self.published.year
sorted_media = sorted(filtered_media, key=lambda x: (x.upload_date, x.key)) elif self.has_metadata and self.upload_date:
position_counter = 1 self_year = self.upload_date.year
for media in sorted_media: elif self.download_date:
# also, unlikely to be accurate
self_year = self.download_date.year
sorted_media = Media.objects.filter(
source=self.source,
metadata__isnull=False,
published__year=self_year,
).order_by(
'published',
'created',
'key',
)
for counter, media in enumerate(sorted_media, start=1):
if media == self: if media == self:
return position_counter return counter
position_counter += 1
def get_episode_str(self, use_padding=False): def get_episode_str(self, use_padding=False):
episode_number = self.calculate_episode_number() episode_number = self.calculate_episode_number()

View File

@ -43,6 +43,8 @@ def source_pre_save(sender, instance, **kwargs):
work_directory = existing_dirpath work_directory = existing_dirpath
for _count in range(parents_count, 0, -1): for _count in range(parents_count, 0, -1):
work_directory = work_directory.parent work_directory = work_directory.parent
if not Path(work_directory).resolve(strict=True).is_relative_to(Path(settings.DOWNLOAD_ROOT)):
work_directory = Path(settings.DOWNLOAD_ROOT)
with TemporaryDirectory(suffix=('.'+new_dirpath.name), prefix='.tmp.', dir=work_directory) as tmp_dir: with TemporaryDirectory(suffix=('.'+new_dirpath.name), prefix='.tmp.', dir=work_directory) as tmp_dir:
tmp_dirpath = Path(tmp_dir) tmp_dirpath = Path(tmp_dir)
existed = None existed = None
@ -129,7 +131,7 @@ def source_post_save(sender, instance, created, **kwargs):
verbose_name = _('Checking all media for source "{}"') verbose_name = _('Checking all media for source "{}"')
save_all_media_for_source( save_all_media_for_source(
str(instance.pk), str(instance.pk),
priority=9, priority=25,
verbose_name=verbose_name.format(instance.name), verbose_name=verbose_name.format(instance.name),
remove_existing_tasks=True remove_existing_tasks=True
) )
@ -167,6 +169,7 @@ def task_task_failed(sender, task_id, completed_task, **kwargs):
@receiver(post_save, sender=Media) @receiver(post_save, sender=Media)
def media_post_save(sender, instance, created, **kwargs): def media_post_save(sender, instance, created, **kwargs):
media = instance
# If the media is skipped manually, bail. # If the media is skipped manually, bail.
if instance.manual_skip: if instance.manual_skip:
return return
@ -176,12 +179,27 @@ def media_post_save(sender, instance, created, **kwargs):
# Reset the skip flag if the download cap has changed if the media has not # Reset the skip flag if the download cap has changed if the media has not
# already been downloaded # already been downloaded
downloaded = instance.downloaded downloaded = instance.downloaded
existing_media_metadata_task = get_media_metadata_task(str(instance.pk))
existing_media_download_task = get_media_download_task(str(instance.pk))
if not downloaded: if not downloaded:
skip_changed = filter_media(instance) # the decision to download was already made if a download task exists
if not existing_media_download_task:
# Recalculate the "can_download" flag, this may
# need to change if the source specifications have been changed
if instance.metadata:
if instance.get_format_str():
if not instance.can_download:
instance.can_download = True
can_download_changed = True
else:
if instance.can_download:
instance.can_download = False
can_download_changed = True
# Recalculate the "skip_changed" flag
skip_changed = filter_media(instance)
else: else:
# Downloaded media might need to be renamed # Downloaded media might need to be renamed
# Check settings before any rename tasks are scheduled # Check settings before any rename tasks are scheduled
media = instance
rename_sources_setting = settings.RENAME_SOURCES or list() rename_sources_setting = settings.RENAME_SOURCES or list()
create_rename_task = ( create_rename_task = (
( (
@ -195,23 +213,11 @@ def media_post_save(sender, instance, created, **kwargs):
rename_media( rename_media(
str(media.pk), str(media.pk),
queue=str(media.pk), queue=str(media.pk),
priority=16, priority=20,
verbose_name=verbose_name.format(media.key, media.name), verbose_name=verbose_name.format(media.key, media.name),
remove_existing_tasks=True remove_existing_tasks=True
) )
# Recalculate the "can_download" flag, this may
# need to change if the source specifications have been changed
if instance.metadata:
if instance.get_format_str():
if not instance.can_download:
instance.can_download = True
can_download_changed = True
else:
if instance.can_download:
instance.can_download = False
can_download_changed = True
existing_media_metadata_task = get_media_metadata_task(str(instance.pk))
# If the media is missing metadata schedule it to be downloaded # If the media is missing metadata schedule it to be downloaded
if not (instance.skip or instance.metadata or existing_media_metadata_task): if not (instance.skip or instance.metadata or existing_media_metadata_task):
log.info(f'Scheduling task to download metadata for: {instance.url}') log.info(f'Scheduling task to download metadata for: {instance.url}')
@ -239,7 +245,6 @@ def media_post_save(sender, instance, created, **kwargs):
verbose_name=verbose_name.format(instance.name), verbose_name=verbose_name.format(instance.name),
remove_existing_tasks=True remove_existing_tasks=True
) )
existing_media_download_task = get_media_download_task(str(instance.pk))
# If the media has not yet been downloaded schedule it to be downloaded # If the media has not yet been downloaded schedule it to be downloaded
if not (instance.media_file_exists or instance.filepath.exists() or existing_media_download_task): if not (instance.media_file_exists or instance.filepath.exists() or existing_media_download_task):
# The file was deleted after it was downloaded, skip this media. # The file was deleted after it was downloaded, skip this media.

View File

@ -17,6 +17,7 @@ from django.conf import settings
from django.core.files.base import ContentFile from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils import timezone from django.utils import timezone
from django.db.transaction import atomic
from django.db.utils import IntegrityError from django.db.utils import IntegrityError
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from background_task import background from background_task import background
@ -179,6 +180,7 @@ def cleanup_removed_media(source, videos):
@background(schedule=300, remove_existing_tasks=True) @background(schedule=300, remove_existing_tasks=True)
@atomic(durable=True)
def index_source_task(source_id): def index_source_task(source_id):
''' '''
Indexes media available from a Source object. Indexes media available from a Source object.
@ -221,7 +223,8 @@ def index_source_task(source_id):
if published_dt is not None: if published_dt is not None:
media.published = published_dt media.published = published_dt
try: try:
media.save() with atomic():
media.save()
log.debug(f'Indexed media: {source} / {media}') log.debug(f'Indexed media: {source} / {media}')
# log the new media instances # log the new media instances
new_media_instance = ( new_media_instance = (
@ -231,6 +234,13 @@ def index_source_task(source_id):
) )
if new_media_instance: if new_media_instance:
log.info(f'Indexed new media: {source} / {media}') log.info(f'Indexed new media: {source} / {media}')
log.info(f'Scheduling task to download metadata for: {media.url}')
verbose_name = _('Downloading metadata for "{}"')
download_media_metadata(
str(media.pk),
priority=9,
verbose_name=verbose_name.format(media.pk),
)
except IntegrityError as e: except IntegrityError as e:
log.error(f'Index media failed: {source} / {media} with "{e}"') log.error(f'Index media failed: {source} / {media} with "{e}"')
# Tack on a cleanup of old completed tasks # Tack on a cleanup of old completed tasks
@ -611,9 +621,10 @@ def save_all_media_for_source(source_id):
# Trigger the post_save signal for each media item linked to this source as various # Trigger the post_save signal for each media item linked to this source as various
# flags may need to be recalculated # flags may need to be recalculated
for media in mqs: with atomic():
if media.uuid not in already_saved: for media in mqs:
media.save() if media.uuid not in already_saved:
media.save()
@background(schedule=60, remove_existing_tasks=True) @background(schedule=60, remove_existing_tasks=True)
@ -626,6 +637,7 @@ def rename_media(media_id):
@background(schedule=300, remove_existing_tasks=True) @background(schedule=300, remove_existing_tasks=True)
@atomic(durable=True)
def rename_all_media_for_source(source_id): def rename_all_media_for_source(source_id):
try: try:
source = Source.objects.get(pk=source_id) source = Source.objects.get(pk=source_id)
@ -653,7 +665,8 @@ def rename_all_media_for_source(source_id):
downloaded=True, downloaded=True,
) )
for media in mqs: for media in mqs:
media.rename_files() with atomic():
media.rename_files()
@background(schedule=60, remove_existing_tasks=True) @background(schedule=60, remove_existing_tasks=True)

View File

@ -17,6 +17,7 @@ from django.conf import settings
from .hooks import postprocessor_hook, progress_hook from .hooks import postprocessor_hook, progress_hook
from .utils import mkdir_p from .utils import mkdir_p
import yt_dlp import yt_dlp
import yt_dlp.patch.check_thumbnails
from yt_dlp.utils import remove_end from yt_dlp.utils import remove_end
@ -146,6 +147,14 @@ def get_media_info(url, days=None):
f'yesterday-{days!s}days' if days else None f'yesterday-{days!s}days' if days else None
) )
opts = get_yt_opts() opts = get_yt_opts()
paths = opts.get('paths', dict())
if 'temp' in paths:
temp_dir_obj = TemporaryDirectory(prefix='.yt_dlp-', dir=paths['temp'])
temp_dir_path = Path(temp_dir_obj.name)
(temp_dir_path / '.ignore').touch(exist_ok=True)
paths.update({
'temp': str(temp_dir_path),
})
opts.update({ opts.update({
'ignoreerrors': False, # explicitly set this to catch exceptions 'ignoreerrors': False, # explicitly set this to catch exceptions
'ignore_no_formats_error': False, # we must fail first to try again with this enabled 'ignore_no_formats_error': False, # we must fail first to try again with this enabled
@ -154,12 +163,17 @@ def get_media_info(url, days=None):
'logger': log, 'logger': log,
'extract_flat': True, 'extract_flat': True,
'check_formats': True, 'check_formats': True,
'check_thumbnails': False,
'daterange': yt_dlp.utils.DateRange(start=start), 'daterange': yt_dlp.utils.DateRange(start=start),
'extractor_args': { 'extractor_args': {
'youtube': {'formats': ['missing_pot']},
'youtubetab': {'approximate_date': ['true']}, 'youtubetab': {'approximate_date': ['true']},
}, },
'paths': paths,
'sleep_interval_requests': 2,
'verbose': True if settings.DEBUG else False,
}) })
if start:
log.debug(f'get_media_info: used date range: {opts["daterange"]} for URL: {url}')
response = {} response = {}
with yt_dlp.YoutubeDL(opts) as y: with yt_dlp.YoutubeDL(opts) as y:
try: try:

View File

@ -1,40 +1,41 @@
import os
import sys import sys
from pathlib import Path from pathlib import Path
from urllib.parse import urljoin from urllib.parse import urljoin
from common.utils import parse_database_connection_string from common.utils import getenv, parse_database_connection_string
BASE_DIR = Path(__file__).resolve().parent.parent BASE_DIR = Path(__file__).resolve().parent.parent
ROOT_DIR = Path('/') ROOT_DIR = Path('/')
CONFIG_BASE_DIR = ROOT_DIR / 'config' CONFIG_BASE_DIR = ROOT_DIR / 'config'
DOWNLOADS_BASE_DIR = ROOT_DIR / 'downloads' DOWNLOADS_BASE_DIR = ROOT_DIR / 'downloads'
DJANGO_URL_PREFIX = os.getenv('DJANGO_URL_PREFIX', None) DJANGO_URL_PREFIX = getenv('DJANGO_URL_PREFIX').strip()
STATIC_URL = str(os.getenv('DJANGO_STATIC_URL', '/static/')) STATIC_URL = getenv('DJANGO_STATIC_URL', '/static/').strip()
if DJANGO_URL_PREFIX and STATIC_URL: if DJANGO_URL_PREFIX and STATIC_URL:
STATIC_URL = urljoin(DJANGO_URL_PREFIX, STATIC_URL[1:]) STATIC_URL = urljoin(DJANGO_URL_PREFIX, STATIC_URL[1:])
# This is not ever meant to be a public web interface so this isn't too critical # This is not ever meant to be a public web interface so this isn't too critical
SECRET_KEY = str(os.getenv('DJANGO_SECRET_KEY', 'tubesync-django-secret')) SECRET_KEY = getenv('DJANGO_SECRET_KEY', 'tubesync-django-secret')
ALLOWED_HOSTS_STR = str(os.getenv('TUBESYNC_HOSTS', '*')) ALLOWED_HOSTS_STR = getenv('TUBESYNC_HOSTS', '*')
ALLOWED_HOSTS = ALLOWED_HOSTS_STR.split(',') ALLOWED_HOSTS = ALLOWED_HOSTS_STR.split(',')
DEBUG = True if os.getenv('TUBESYNC_DEBUG', False) else False DEBUG_STR = getenv('TUBESYNC_DEBUG', False)
FORCE_SCRIPT_NAME = os.getenv('DJANGO_FORCE_SCRIPT_NAME', DJANGO_URL_PREFIX) DEBUG = True if 'true' == DEBUG_STR.strip().lower() else False
FORCE_SCRIPT_NAME = getenv('DJANGO_FORCE_SCRIPT_NAME', DJANGO_URL_PREFIX)
database_dict = {} database_dict = {}
database_connection_env = os.getenv('DATABASE_CONNECTION', '') database_connection_env = getenv('DATABASE_CONNECTION')
if database_connection_env: if database_connection_env:
database_dict = parse_database_connection_string(database_connection_env) database_dict = parse_database_connection_string(database_connection_env)
if database_dict: if database_dict:
print(f'Using database connection: {database_dict["ENGINE"]}://' print(f'Using database connection: {database_dict["DRIVER"]}://'
f'{database_dict["USER"]}:[hidden]@{database_dict["HOST"]}:' f'{database_dict["USER"]}:[hidden]@{database_dict["HOST"]}:'
f'{database_dict["PORT"]}/{database_dict["NAME"]}', file=sys.stdout) f'{database_dict["PORT"]}/{database_dict["NAME"]}',
file=sys.stdout, flush=True)
DATABASES = { DATABASES = {
'default': database_dict, 'default': database_dict,
} }
@ -60,7 +61,7 @@ else:
DEFAULT_THREADS = 1 DEFAULT_THREADS = 1
BACKGROUND_TASK_ASYNC_THREADS = int(os.getenv('TUBESYNC_WORKERS', DEFAULT_THREADS)) BACKGROUND_TASK_ASYNC_THREADS = getenv('TUBESYNC_WORKERS', DEFAULT_THREADS, integer=True)
MEDIA_ROOT = CONFIG_BASE_DIR / 'media' MEDIA_ROOT = CONFIG_BASE_DIR / 'media'
@ -70,14 +71,14 @@ YOUTUBE_DL_TEMPDIR = DOWNLOAD_ROOT / 'cache'
COOKIES_FILE = CONFIG_BASE_DIR / 'cookies.txt' COOKIES_FILE = CONFIG_BASE_DIR / 'cookies.txt'
HEALTHCHECK_FIREWALL_STR = str(os.getenv('TUBESYNC_HEALTHCHECK_FIREWALL', 'True')).strip().lower() HEALTHCHECK_FIREWALL_STR = getenv('TUBESYNC_HEALTHCHECK_FIREWALL', True)
HEALTHCHECK_FIREWALL = True if HEALTHCHECK_FIREWALL_STR == 'true' else False HEALTHCHECK_FIREWALL = ( 'true' == HEALTHCHECK_FIREWALL_STR.strip().lower() )
HEALTHCHECK_ALLOWED_IPS_STR = str(os.getenv('TUBESYNC_HEALTHCHECK_ALLOWED_IPS', '127.0.0.1')) HEALTHCHECK_ALLOWED_IPS_STR = getenv('TUBESYNC_HEALTHCHECK_ALLOWED_IPS', '127.0.0.1')
HEALTHCHECK_ALLOWED_IPS = HEALTHCHECK_ALLOWED_IPS_STR.split(',') HEALTHCHECK_ALLOWED_IPS = HEALTHCHECK_ALLOWED_IPS_STR.split(',')
BASICAUTH_USERNAME = os.getenv('HTTP_USER', '').strip() BASICAUTH_USERNAME = getenv('HTTP_USER').strip()
BASICAUTH_PASSWORD = os.getenv('HTTP_PASS', '').strip() BASICAUTH_PASSWORD = getenv('HTTP_PASS').strip()
if BASICAUTH_USERNAME and BASICAUTH_PASSWORD: if BASICAUTH_USERNAME and BASICAUTH_PASSWORD:
BASICAUTH_DISABLE = False BASICAUTH_DISABLE = False
BASICAUTH_USERS = { BASICAUTH_USERS = {
@ -88,25 +89,25 @@ else:
BASICAUTH_USERS = {} BASICAUTH_USERS = {}
SOURCE_DOWNLOAD_DIRECTORY_PREFIX_STR = os.getenv('TUBESYNC_DIRECTORY_PREFIX', 'True').strip().lower() SOURCE_DOWNLOAD_DIRECTORY_PREFIX_STR = getenv('TUBESYNC_DIRECTORY_PREFIX', True)
SOURCE_DOWNLOAD_DIRECTORY_PREFIX = True if SOURCE_DOWNLOAD_DIRECTORY_PREFIX_STR == 'true' else False SOURCE_DOWNLOAD_DIRECTORY_PREFIX = ( 'true' == SOURCE_DOWNLOAD_DIRECTORY_PREFIX_STR.strip().lower() )
SHRINK_NEW_MEDIA_METADATA_STR = os.getenv('TUBESYNC_SHRINK_NEW', 'false').strip().lower() SHRINK_NEW_MEDIA_METADATA_STR = getenv('TUBESYNC_SHRINK_NEW', False)
SHRINK_NEW_MEDIA_METADATA = ( 'true' == SHRINK_NEW_MEDIA_METADATA_STR ) SHRINK_NEW_MEDIA_METADATA = ( 'true' == SHRINK_NEW_MEDIA_METADATA_STR.strip().lower() )
SHRINK_OLD_MEDIA_METADATA_STR = os.getenv('TUBESYNC_SHRINK_OLD', 'false').strip().lower() SHRINK_OLD_MEDIA_METADATA_STR = getenv('TUBESYNC_SHRINK_OLD', False)
SHRINK_OLD_MEDIA_METADATA = ( 'true' == SHRINK_OLD_MEDIA_METADATA_STR ) SHRINK_OLD_MEDIA_METADATA = ( 'true' == SHRINK_OLD_MEDIA_METADATA_STR.strip().lower() )
# TUBESYNC_RENAME_ALL_SOURCES: True or False # TUBESYNC_RENAME_ALL_SOURCES: True or False
RENAME_ALL_SOURCES_STR = os.getenv('TUBESYNC_RENAME_ALL_SOURCES', 'False').strip().lower() RENAME_ALL_SOURCES_STR = getenv('TUBESYNC_RENAME_ALL_SOURCES', False)
RENAME_ALL_SOURCES = ( 'true' == RENAME_ALL_SOURCES_STR ) RENAME_ALL_SOURCES = ( 'true' == RENAME_ALL_SOURCES_STR.strip().lower() )
# TUBESYNC_RENAME_SOURCES: A comma-separated list of Source directories # TUBESYNC_RENAME_SOURCES: A comma-separated list of Source directories
RENAME_SOURCES_STR = os.getenv('TUBESYNC_RENAME_SOURCES', '') RENAME_SOURCES_STR = getenv('TUBESYNC_RENAME_SOURCES')
RENAME_SOURCES = RENAME_SOURCES_STR.split(',') if RENAME_SOURCES_STR else None RENAME_SOURCES = RENAME_SOURCES_STR.split(',') if RENAME_SOURCES_STR else None
VIDEO_HEIGHT_CUTOFF = int(os.getenv("TUBESYNC_VIDEO_HEIGHT_CUTOFF", "240")) VIDEO_HEIGHT_CUTOFF = getenv("TUBESYNC_VIDEO_HEIGHT_CUTOFF", 240, integer=True)
# ensure that the current directory exists # ensure that the current directory exists
@ -117,4 +118,11 @@ old_youtube_cache_dirs = list(YOUTUBE_DL_CACHEDIR.parent.glob('youtube-*'))
old_youtube_cache_dirs.extend(list(YOUTUBE_DL_CACHEDIR.parent.glob('youtube/youtube-*'))) old_youtube_cache_dirs.extend(list(YOUTUBE_DL_CACHEDIR.parent.glob('youtube/youtube-*')))
for cache_dir in old_youtube_cache_dirs: for cache_dir in old_youtube_cache_dirs:
cache_dir.rename(YOUTUBE_DL_CACHEDIR / cache_dir.name) cache_dir.rename(YOUTUBE_DL_CACHEDIR / cache_dir.name)
# try to remove the old, hopefully empty, directory
empty_old_youtube_dir = YOUTUBE_DL_CACHEDIR.parent / 'youtube'
if empty_old_youtube_dir.is_dir():
try:
empty_old_youtube_dir.rmdir()
except:
pass

View File

@ -1,5 +1,5 @@
import os
from pathlib import Path from pathlib import Path
from common.utils import getenv
BASE_DIR = Path(__file__).resolve().parent.parent BASE_DIR = Path(__file__).resolve().parent.parent
@ -97,7 +97,7 @@ AUTH_PASSWORD_VALIDATORS = [
LANGUAGE_CODE = 'en-us' LANGUAGE_CODE = 'en-us'
TIME_ZONE = os.getenv('TZ', 'UTC') TIME_ZONE = getenv('TZ', 'UTC')
USE_I18N = True USE_I18N = True
USE_L10N = True USE_L10N = True
USE_TZ = True USE_TZ = True