diff --git a/Dockerfile b/Dockerfile index 76bb21b2..023f4fd8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,124 +1,270 @@ -FROM debian:bookworm-slim +# syntax=docker/dockerfile:1 +# check=error=true -ARG TARGETARCH -ARG TARGETPLATFORM +ARG FFMPEG_DATE="2025-01-21-14-19" +ARG FFMPEG_VERSION="N-118328-g504df09c34" ARG S6_VERSION="3.2.0.2" + ARG SHA256_S6_AMD64="59289456ab1761e277bd456a95e737c06b03ede99158beb24f12b165a904f478" ARG SHA256_S6_ARM64="8b22a2eaca4bf0b27a43d36e65c89d2701738f628d1abd0cea5569619f66f785" ARG SHA256_S6_NOARCH="6dbcde158a3e78b9bb141d7bcb5ccb421e563523babbe2c64470e76f4fd02dae" -ARG FFMPEG_DATE="autobuild-2024-12-24-14-15" -ARG FFMPEG_VERSION="N-118163-g954d55c2a4" -ARG SHA256_FFMPEG_AMD64="798a7e5a0724139e6bb70df8921522b23be27028f9f551dfa83c305ec4ffaf3a" -ARG SHA256_FFMPEG_ARM64="c3e6cc0fec42cc7e3804014fbb02c1384a1a31ef13f6f9a36121f2e1216240c0" +ARG ALPINE_VERSION="latest" +ARG DEBIAN_VERSION="bookworm-slim" -ENV S6_VERSION="${S6_VERSION}" \ - FFMPEG_DATE="${FFMPEG_DATE}" \ - FFMPEG_VERSION="${FFMPEG_VERSION}" +ARG FFMPEG_PREFIX_FILE="ffmpeg-${FFMPEG_VERSION}" +ARG FFMPEG_SUFFIX_FILE=".tar.xz" + +ARG FFMPEG_CHECKSUM_ALGORITHM="sha256" +ARG S6_CHECKSUM_ALGORITHM="sha256" + + +FROM alpine:${ALPINE_VERSION} AS ffmpeg-download +ARG FFMPEG_DATE +ARG FFMPEG_VERSION +ARG FFMPEG_PREFIX_FILE +ARG FFMPEG_SUFFIX_FILE +ARG SHA256_FFMPEG_AMD64 +ARG SHA256_FFMPEG_ARM64 +ARG FFMPEG_CHECKSUM_ALGORITHM +ARG CHECKSUM_ALGORITHM="${FFMPEG_CHECKSUM_ALGORITHM}" +ARG FFMPEG_CHECKSUM_AMD64="${SHA256_FFMPEG_AMD64}" +ARG FFMPEG_CHECKSUM_ARM64="${SHA256_FFMPEG_ARM64}" + +ARG FFMPEG_FILE_SUMS="checksums.${CHECKSUM_ALGORITHM}" +ARG FFMPEG_URL="https://github.com/yt-dlp/FFmpeg-Builds/releases/download/autobuild-${FFMPEG_DATE}" + +ARG DESTDIR="/downloaded" +ARG TARGETARCH +ADD "${FFMPEG_URL}/${FFMPEG_FILE_SUMS}" "${DESTDIR}/" +RUN set -eu ; \ + apk --no-cache --no-progress add cmd:aria2c cmd:awk "cmd:${CHECKSUM_ALGORITHM}sum" ; \ +\ + aria2c_options() { \ + algorithm="${CHECKSUM_ALGORITHM%[0-9]??}" ; \ + bytes="${CHECKSUM_ALGORITHM#${algorithm}}" ; \ + hash="$( awk -v fn="${1##*/}" '$0 ~ fn"$" { print $1; exit; }' "${DESTDIR}/${FFMPEG_FILE_SUMS}" )" ; \ +\ + printf -- '\t%s\n' \ + 'allow-overwrite=true' \ + 'always-resume=false' \ + 'check-integrity=true' \ + "checksum=${algorithm}-${bytes}=${hash}" \ + 'max-connection-per-server=2' \ +; \ + printf -- '\n' ; \ + } ; \ +\ + decide_arch() { \ + case "${TARGETARCH}" in \ + (amd64) printf -- 'linux64' ;; \ + (arm64) printf -- 'linuxarm64' ;; \ + esac ; \ + } ; \ +\ + FFMPEG_ARCH="$(decide_arch)" ; \ + FFMPEG_PREFIX_FILE="$( printf -- '%s' "${FFMPEG_PREFIX_FILE}" | cut -d '-' -f 1,2 )" ; \ + for url in $(awk ' \ + $2 ~ /^[*]?'"${FFMPEG_PREFIX_FILE}"'/ && /-'"${FFMPEG_ARCH}"'-/ { $1=""; print; } \ + ' "${DESTDIR}/${FFMPEG_FILE_SUMS}") ; \ + do \ + url="${FFMPEG_URL}/${url# }" ; \ + printf -- '%s\n' "${url}" ; \ + aria2c_options "${url}" ; \ + printf -- '\n' ; \ + done > /tmp/downloads ; \ + unset -v url ; \ +\ + aria2c --no-conf=true \ + --dir /downloaded \ + --lowest-speed-limit='16K' \ + --show-console-readout=false \ + --summary-interval=0 \ + --input-file /tmp/downloads ; \ +\ + decide_expected() { \ + case "${TARGETARCH}" in \ + (amd64) printf -- '%s' "${FFMPEG_CHECKSUM_AMD64}" ;; \ + (arm64) printf -- '%s' "${FFMPEG_CHECKSUM_ARM64}" ;; \ + esac ; \ + } ; \ +\ + FFMPEG_HASH="$(decide_expected)" ; \ +\ + cd "${DESTDIR}" ; \ + if [ -n "${FFMPEG_HASH}" ] ; \ + then \ + printf -- '%s *%s\n' "${FFMPEG_HASH}" "${FFMPEG_PREFIX_FILE}"*-"${FFMPEG_ARCH}"-*"${FFMPEG_SUFFIX_FILE}" >> /tmp/SUMS ; \ + "${CHECKSUM_ALGORITHM}sum" --check --warn --strict /tmp/SUMS || exit ; \ + fi ; \ + "${CHECKSUM_ALGORITHM}sum" --check --warn --strict --ignore-missing "${DESTDIR}/${FFMPEG_FILE_SUMS}" ; \ +\ + mkdir -v -p "/verified/${TARGETARCH}" ; \ + ln -v "${FFMPEG_PREFIX_FILE}"*-"${FFMPEG_ARCH}"-*"${FFMPEG_SUFFIX_FILE}" "/verified/${TARGETARCH}/" ; \ + rm -rf "${DESTDIR}" ; + +FROM alpine:${ALPINE_VERSION} AS ffmpeg-extracted +COPY --from=ffmpeg-download /verified /verified + +ARG FFMPEG_PREFIX_FILE +ARG FFMPEG_SUFFIX_FILE +ARG TARGETARCH +RUN set -eux ; \ + mkdir -v /extracted ; \ + cd /extracted ; \ + ln -s "/verified/${TARGETARCH}"/"${FFMPEG_PREFIX_FILE}"*"${FFMPEG_SUFFIX_FILE}" "/tmp/ffmpeg${FFMPEG_SUFFIX_FILE}" ; \ + tar -tf "/tmp/ffmpeg${FFMPEG_SUFFIX_FILE}" | grep '/bin/\(ffmpeg\|ffprobe\)' > /tmp/files ; \ + tar -xop \ + --strip-components=2 \ + -f "/tmp/ffmpeg${FFMPEG_SUFFIX_FILE}" \ + -T /tmp/files ; \ +\ + ls -AlR /extracted ; + +FROM scratch AS ffmpeg +COPY --from=ffmpeg-extracted /extracted /usr/local/bin/ + +FROM alpine:${ALPINE_VERSION} AS s6-overlay-download +ARG S6_VERSION +ARG SHA256_S6_AMD64 +ARG SHA256_S6_ARM64 +ARG SHA256_S6_NOARCH + +ARG DESTDIR="/downloaded" +ARG S6_CHECKSUM_ALGORITHM +ARG CHECKSUM_ALGORITHM="${S6_CHECKSUM_ALGORITHM}" + +ARG S6_CHECKSUM_AMD64="${CHECKSUM_ALGORITHM}:${SHA256_S6_AMD64}" +ARG S6_CHECKSUM_ARM64="${CHECKSUM_ALGORITHM}:${SHA256_S6_ARM64}" +ARG S6_CHECKSUM_NOARCH="${CHECKSUM_ALGORITHM}:${SHA256_S6_NOARCH}" + +ARG S6_OVERLAY_URL="https://github.com/just-containers/s6-overlay/releases/download/v${S6_VERSION}" +ARG S6_PREFIX_FILE="s6-overlay-" +ARG S6_SUFFIX_FILE=".tar.xz" + +ARG S6_FILE_AMD64="${S6_PREFIX_FILE}x86_64${S6_SUFFIX_FILE}" +ARG S6_FILE_ARM64="${S6_PREFIX_FILE}aarch64${S6_SUFFIX_FILE}" +ARG S6_FILE_NOARCH="${S6_PREFIX_FILE}noarch${S6_SUFFIX_FILE}" + +ADD "${S6_OVERLAY_URL}/${S6_FILE_AMD64}.${CHECKSUM_ALGORITHM}" "${DESTDIR}/" +ADD "${S6_OVERLAY_URL}/${S6_FILE_ARM64}.${CHECKSUM_ALGORITHM}" "${DESTDIR}/" +ADD "${S6_OVERLAY_URL}/${S6_FILE_NOARCH}.${CHECKSUM_ALGORITHM}" "${DESTDIR}/" + +##ADD --checksum="${S6_CHECKSUM_AMD64}" "${S6_OVERLAY_URL}/${S6_FILE_AMD64}" "${DESTDIR}/" +##ADD --checksum="${S6_CHECKSUM_ARM64}" "${S6_OVERLAY_URL}/${S6_FILE_ARM64}" "${DESTDIR}/" +##ADD --checksum="${S6_CHECKSUM_NOARCH}" "${S6_OVERLAY_URL}/${S6_FILE_NOARCH}" "${DESTDIR}/" + +# --checksum wasn't recognized, so use busybox to check the sums instead +ADD "${S6_OVERLAY_URL}/${S6_FILE_AMD64}" "${DESTDIR}/" +RUN set -eu ; checksum="${S6_CHECKSUM_AMD64}" ; file="${S6_FILE_AMD64}" ; cd "${DESTDIR}/" && \ + printf -- '%s *%s\n' "$(printf -- '%s' "${checksum}" | cut -d : -f 2-)" "${file}" | "${CHECKSUM_ALGORITHM}sum" -cw + +ADD "${S6_OVERLAY_URL}/${S6_FILE_ARM64}" "${DESTDIR}/" +RUN set -eu ; checksum="${S6_CHECKSUM_ARM64}" ; file="${S6_FILE_ARM64}" ; cd "${DESTDIR}/" && \ + printf -- '%s *%s\n' "$(printf -- '%s' "${checksum}" | cut -d : -f 2-)" "${file}" | "${CHECKSUM_ALGORITHM}sum" -cw + +ADD "${S6_OVERLAY_URL}/${S6_FILE_NOARCH}" "${DESTDIR}/" +RUN set -eu ; checksum="${S6_CHECKSUM_NOARCH}" ; file="${S6_FILE_NOARCH}" ; cd "${DESTDIR}/" && \ + printf -- '%s *%s\n' "$(printf -- '%s' "${checksum}" | cut -d : -f 2-)" "${file}" | "${CHECKSUM_ALGORITHM}sum" -cw + +FROM alpine:${ALPINE_VERSION} AS s6-overlay-extracted +COPY --from=s6-overlay-download /downloaded /downloaded + +ARG S6_CHECKSUM_ALGORITHM +ARG CHECKSUM_ALGORITHM="${S6_CHECKSUM_ALGORITHM}" + +ARG TARGETARCH + +RUN set -eu ; \ +\ + decide_arch() { \ + local arg1 ; \ + arg1="${1:-$(uname -m)}" ; \ +\ + case "${arg1}" in \ + (amd64) printf -- 'x86_64' ;; \ + (arm64) printf -- 'aarch64' ;; \ + (armv7l) printf -- 'arm' ;; \ + (*) printf -- '%s' "${arg1}" ;; \ + esac ; \ + unset -v arg1 ; \ + } ; \ +\ + apk --no-cache --no-progress add "cmd:${CHECKSUM_ALGORITHM}sum" ; \ + mkdir -v /verified ; \ + cd /downloaded ; \ + for f in *.sha256 ; \ + do \ + "${CHECKSUM_ALGORITHM}sum" --check --warn --strict "${f}" || exit ; \ + ln -v "${f%.sha256}" /verified/ || exit ; \ + done ; \ + unset -v f ; \ +\ + S6_ARCH="$(decide_arch "${TARGETARCH}")" ; \ + set -x ; \ + mkdir -v /s6-overlay-rootfs ; \ + cd /s6-overlay-rootfs ; \ + for f in /verified/*.tar* ; \ + do \ + case "${f}" in \ + (*-noarch.tar*|*-"${S6_ARCH}".tar*) \ + tar -xpf "${f}" || exit ;; \ + esac ; \ + done ; \ + set +x ; \ + unset -v f ; + +FROM scratch AS s6-overlay +COPY --from=s6-overlay-extracted /s6-overlay-rootfs / + +FROM debian:${DEBIAN_VERSION} AS tubesync + +ARG S6_VERSION + +ARG FFMPEG_DATE +ARG FFMPEG_VERSION ENV DEBIAN_FRONTEND="noninteractive" \ - HOME="/root" \ - LANGUAGE="en_US.UTF-8" \ - LANG="en_US.UTF-8" \ - LC_ALL="en_US.UTF-8" \ - TERM="xterm" \ - S6_CMD_WAIT_FOR_SERVICES_MAXTIME="0" + HOME="/root" \ + LANGUAGE="en_US.UTF-8" \ + LANG="en_US.UTF-8" \ + LC_ALL="en_US.UTF-8" \ + TERM="xterm" \ + # Do not include compiled byte-code + PIP_NO_COMPILE=1 \ + PIP_ROOT_USER_ACTION='ignore' \ + S6_CMD_WAIT_FOR_SERVICES_MAXTIME="0" + +ENV S6_VERSION="${S6_VERSION}" \ + FFMPEG_DATE="${FFMPEG_DATE}" \ + FFMPEG_VERSION="${FFMPEG_VERSION}" # Install third party software +COPY --from=s6-overlay / / +COPY --from=ffmpeg /usr/local/bin/ /usr/local/bin/ + # Reminder: the SHELL handles all variables -RUN decide_arch() { \ - case "${TARGETARCH:=amd64}" in \ - (arm64) printf -- 'aarch64' ;; \ - (*) printf -- '%s' "${TARGETARCH}" ;; \ - esac ; \ - } && \ - decide_expected() { \ - case "${1}" in \ - (ffmpeg) case "${2}" in \ - (amd64) printf -- '%s' "${SHA256_FFMPEG_AMD64}" ;; \ - (arm64) printf -- '%s' "${SHA256_FFMPEG_ARM64}" ;; \ - esac ;; \ - (s6) case "${2}" in \ - (amd64) printf -- '%s' "${SHA256_S6_AMD64}" ;; \ - (arm64) printf -- '%s' "${SHA256_S6_ARM64}" ;; \ - (noarch) printf -- '%s' "${SHA256_S6_NOARCH}" ;; \ - esac ;; \ - esac ; \ - } && \ - decide_url() { \ - case "${1}" in \ - (ffmpeg) printf -- \ - 'https://github.com/yt-dlp/FFmpeg-Builds/releases/download/%s/ffmpeg-%s-linux%s-gpl%s.tar.xz' \ - "${FFMPEG_DATE}" \ - "${FFMPEG_VERSION}" \ - "$(case "${2}" in \ - (amd64) printf -- '64' ;; \ - (*) printf -- '%s' "${2}" ;; \ - esac)" \ - "$(case "${FFMPEG_VERSION%%-*}" in \ - (n*) printf -- '-%s\n' "${FFMPEG_VERSION#n}" | cut -d '-' -f 1,2 ;; \ - (*) printf -- '' ;; \ - esac)" ;; \ - (s6) printf -- \ - 'https://github.com/just-containers/s6-overlay/releases/download/v%s/s6-overlay-%s.tar.xz' \ - "${S6_VERSION}" \ - "$(case "${2}" in \ - (amd64) printf -- 'x86_64' ;; \ - (arm64) printf -- 'aarch64' ;; \ - (*) printf -- '%s' "${2}" ;; \ - esac)" ;; \ - esac ; \ - } && \ - verify_download() { \ - while [ $# -ge 2 ] ; do \ - sha256sum "${2}" ; \ - printf -- '%s %s\n' "${1}" "${2}" | sha256sum -c || return ; \ - shift ; shift ; \ - done ; \ - } && \ - download_expected_file() { \ - local arg1 expected file url ; \ - arg1="$(printf -- '%s\n' "${1}" | awk '{print toupper($0);}')" ; \ - expected="$(decide_expected "${1}" "${2}")" ; \ - file="${3}" ; \ - url="$(decide_url "${1}" "${2}")" ; \ - printf -- '%s\n' \ - "Building for arch: ${2}|${ARCH}, downloading ${arg1} from: ${url}, expecting ${arg1} SHA256: ${expected}" && \ - rm -rf "${file}" && \ - curl --disable --output "${file}" --clobber --location --no-progress-meter --url "${url}" && \ - verify_download "${expected}" "${file}" ; \ - } && \ - export ARCH="$(decide_arch)" && \ +RUN --mount=type=cache,id=apt-lib-cache,sharing=locked,target=/var/lib/apt \ + --mount=type=cache,id=apt-cache-cache,sharing=locked,target=/var/cache/apt \ set -x && \ + # Update from the network and keep cache + rm -f /etc/apt/apt.conf.d/docker-clean && \ apt-get update && \ + # Install locales apt-get -y --no-install-recommends install locales && \ printf -- "en_US.UTF-8 UTF-8\n" > /etc/locale.gen && \ locale-gen en_US.UTF-8 && \ - # Install required distro packages - apt-get -y --no-install-recommends install curl ca-certificates file binutils xz-utils && \ - # Install s6 - _file="/tmp/s6-overlay-noarch.tar.xz" && \ - download_expected_file s6 noarch "${_file}" && \ - tar -C / -xpf "${_file}" && rm -f "${_file}" && \ - _file="/tmp/s6-overlay-${ARCH}.tar.xz" && \ - download_expected_file s6 "${TARGETARCH}" "${_file}" && \ - tar -C / -xpf "${_file}" && rm -f "${_file}" && \ + # Install file + apt-get -y --no-install-recommends install file && \ + # Installed s6 (using COPY earlier) file -L /command/s6-overlay-suexec && \ - # Install ffmpeg - _file="/tmp/ffmpeg-${ARCH}.tar.xz" && \ - download_expected_file ffmpeg "${TARGETARCH}" "${_file}" && \ - tar -xvvpf "${_file}" --strip-components=2 --no-anchored -C /usr/local/bin/ "ffmpeg" "ffprobe" && rm -f "${_file}" && \ + # Installed ffmpeg (using COPY earlier) + /usr/local/bin/ffmpeg -version && \ file /usr/local/bin/ff* && \ - # Clean up - apt-get -y autoremove --purge curl file binutils xz-utils && \ - rm -rf /var/lib/apt/lists/* && \ - rm -rf /var/cache/apt/* && \ - rm -rf /tmp/* - -# Install dependencies we keep -RUN set -x && \ - apt-get update && \ + # Clean up file + apt-get -y autoremove --purge file && \ + # Install dependencies we keep # Install required distro packages apt-get -y --no-install-recommends install \ libjpeg62-turbo \ @@ -131,27 +277,29 @@ RUN set -x && \ python3 \ python3-wheel \ redis-server \ - && apt-get -y autoclean && \ - rm -rf /var/lib/apt/lists/* && \ - rm -rf /var/cache/apt/* && \ + curl \ + less \ + && \ + # Clean up + apt-get -y autopurge && \ + apt-get -y autoclean && \ rm -rf /tmp/* # Copy over pip.conf to use piwheels COPY pip.conf /etc/pip.conf -# Add Pipfile -COPY Pipfile /app/Pipfile - -# Do not include compiled byte-code -ENV PIP_NO_COMPILE=1 \ - PIP_NO_CACHE_DIR=1 \ - PIP_ROOT_USER_ACTION='ignore' - # Switch workdir to the the app WORKDIR /app # Set up the app -RUN set -x && \ +RUN --mount=type=tmpfs,target=/cache \ + --mount=type=cache,id=pipenv-cache,sharing=locked,target=/cache/pipenv \ + --mount=type=cache,id=apt-lib-cache,sharing=locked,target=/var/lib/apt \ + --mount=type=cache,id=apt-cache-cache,sharing=locked,target=/var/cache/apt \ + --mount=type=bind,source=Pipfile,target=/app/Pipfile \ + set -x && \ + # Update from the network and keep cache + rm -f /etc/apt/apt.conf.d/docker-clean && \ apt-get update && \ # Install required build packages apt-get -y --no-install-recommends install \ @@ -172,10 +320,11 @@ RUN set -x && \ useradd -M -d /app -s /bin/false -g app app && \ # Install non-distro packages cp -at /tmp/ "${HOME}" && \ - PIPENV_VERBOSITY=64 HOME="/tmp/${HOME#/}" pipenv install --system --skip-lock && \ + HOME="/tmp/${HOME#/}" \ + XDG_CACHE_HOME='/cache' \ + PIPENV_VERBOSITY=64 \ + pipenv install --system --skip-lock && \ # Clean up - rm /app/Pipfile && \ - pipenv --clear && \ apt-get -y autoremove --purge \ default-libmysqlclient-dev \ g++ \ @@ -189,12 +338,9 @@ RUN set -x && \ python3-pip \ zlib1g-dev \ && \ - apt-get -y autoremove && \ + apt-get -y autopurge && \ apt-get -y autoclean && \ - rm -rf /var/lib/apt/lists/* && \ - rm -rf /var/cache/apt/* && \ - rm -rf /tmp/* - + rm -v -rf /tmp/* # Copy app COPY tubesync /app @@ -212,24 +358,21 @@ RUN set -x && \ mkdir -v -p /config/media && \ mkdir -v -p /config/cache/pycache && \ mkdir -v -p /downloads/audio && \ - mkdir -v -p /downloads/video - - -# Append software versions -RUN set -x && \ - /usr/local/bin/ffmpeg -version && \ - FFMPEG_VERSION=$(/usr/local/bin/ffmpeg -version | awk -v 'ev=31' '1 == NR && "ffmpeg" == $1 { print $3; ev=0; } END { exit ev; }') && \ - test -n "${FFMPEG_VERSION}" && \ - printf -- "ffmpeg_version = '%s'\n" "${FFMPEG_VERSION}" >> /app/common/third_party_versions.py + mkdir -v -p /downloads/video && \ + # Append software versions + ffmpeg_version=$(/usr/local/bin/ffmpeg -version | awk -v 'ev=31' '1 == NR && "ffmpeg" == $1 { print $3; ev=0; } END { exit ev; }') && \ + test -n "${ffmpeg_version}" && \ + printf -- "ffmpeg_version = '%s'\n" "${ffmpeg_version}" >> /app/common/third_party_versions.py # Copy root COPY config/root / # Create a healthcheck -HEALTHCHECK --interval=1m --timeout=10s CMD /app/healthcheck.py http://127.0.0.1:8080/healthcheck +HEALTHCHECK --interval=1m --timeout=10s --start-period=3m CMD ["/app/healthcheck.py", "http://127.0.0.1:8080/healthcheck"] # ENVS and ports -ENV PYTHONPATH="/app" PYTHONPYCACHEPREFIX="/config/cache/pycache" +ENV PYTHONPATH="/app" \ + PYTHONPYCACHEPREFIX="/config/cache/pycache" EXPOSE 4848 # Volumes diff --git a/README.md b/README.md index a01f9830..af3cd910 100644 --- a/README.md +++ b/README.md @@ -138,6 +138,11 @@ services: - PGID=1000 ``` +> [!IMPORTANT] +> If the `/downloads` directory is mounted from a [Samba volume](https://docs.docker.com/engine/storage/volumes/#create-cifssamba-volumes), be sure to also supply the `uid` and `gid` mount parameters in the driver options. +> These must be matched to the `PUID` and `PGID` values, which were specified as environment variables. +> +> Matching these user and group ID numbers prevents issues when executing file actions, such as writing metadata. See [this issue](https://github.com/meeb/tubesync/issues/616#issuecomment-2593458282) for details. ## Optional authentication @@ -320,7 +325,7 @@ Notable libraries and software used: * [django-sass](https://github.com/coderedcorp/django-sass/) * The container bundles with `s6-init` and `nginx` -See the [Pipefile](https://github.com/meeb/tubesync/blob/main/Pipfile) for a full list. +See the [Pipfile](https://github.com/meeb/tubesync/blob/main/Pipfile) for a full list. ### Can I get access to the full Django admin? @@ -348,7 +353,12 @@ etc.). Configuration of this is beyond the scope of this README. ### What architectures does the container support? -Just `amd64` for the moment. Others may be made available if there is demand. +Only two are supported, for the moment: +- `amd64` (most desktop PCs and servers) +- `arm64` +(modern ARM computers, such as the Rasperry Pi 3 or later) + +Others may be made available, if there is demand. ### The pipenv install fails with "Locking failed"! diff --git a/config/root/etc/nginx/nginx.conf b/config/root/etc/nginx/nginx.conf index 14c5aea9..f09c02e1 100644 --- a/config/root/etc/nginx/nginx.conf +++ b/config/root/etc/nginx/nginx.conf @@ -2,6 +2,7 @@ daemon off; user app; worker_processes auto; +worker_cpu_affinity auto; pid /run/nginx.pid; events { diff --git a/config/root/etc/s6-overlay/s6-rc.d/nginx/run b/config/root/etc/s6-overlay/s6-rc.d/nginx/run index 6981f2e9..87769e62 100755 --- a/config/root/etc/s6-overlay/s6-rc.d/nginx/run +++ b/config/root/etc/s6-overlay/s6-rc.d/nginx/run @@ -2,4 +2,4 @@ cd / -/usr/sbin/nginx +exec /usr/sbin/nginx diff --git a/tubesync/healthcheck.py b/tubesync/healthcheck.py index 840da640..5bc127b0 100755 --- a/tubesync/healthcheck.py +++ b/tubesync/healthcheck.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/python3 ''' Perform an HTTP request to a URL and exit with an exit code of 1 if the diff --git a/tubesync/sync/models.py b/tubesync/sync/models.py index fc8a33b6..18edccfd 100644 --- a/tubesync/sync/models.py +++ b/tubesync/sync/models.py @@ -20,8 +20,8 @@ from common.utils import clean_filename, clean_emoji from .youtube import (get_media_info as get_youtube_media_info, download_media as download_youtube_media, get_channel_image_info as get_youtube_channel_image_info) -from .utils import (seconds_to_timestr, parse_media_format, write_text_file, - mkdir_p, directory_and_stem, glob_quote) +from .utils import (seconds_to_timestr, parse_media_format, filter_response, + write_text_file, mkdir_p, directory_and_stem, glob_quote) from .matching import (get_best_combined_format, get_best_audio_format, get_best_video_format) from .mediaservers import PlexMediaServer @@ -591,6 +591,7 @@ class Source(models.Model): 'key': 'SoMeUnIqUiD', 'format': '-'.join(fmt), 'playlist_title': 'Some Playlist Title', + 'video_order': '01', 'ext': self.extension, 'resolution': self.source_resolution if self.source_resolution else '', 'height': '720' if self.source_resolution else '', @@ -1130,6 +1131,7 @@ class Media(models.Model): 'key': self.key, 'format': '-'.join(display_format['format']), 'playlist_title': self.playlist_title, + 'video_order': self.get_episode_str(True), 'ext': self.source.extension, 'resolution': display_format['resolution'], 'height': display_format['height'], @@ -1145,8 +1147,39 @@ class Media(models.Model): def has_metadata(self): return self.metadata is not None + + @property + def reduce_data(self): + try: + from common.logger import log + from common.utils import json_serial + + old_mdl = len(self.metadata or "") + data = json.loads(self.metadata or "") + compact_json = json.dumps(data, separators=(',', ':'), default=json_serial) + + filtered_data = filter_response(data, True) + filtered_json = json.dumps(filtered_data, separators=(',', ':'), default=json_serial) + except Exception as e: + log.exception('reduce_data: %s', e) + else: + # log the results of filtering / compacting on metadata size + new_mdl = len(compact_json) + if old_mdl > new_mdl: + delta = old_mdl - new_mdl + log.info(f'{self.key}: metadata compacted by {delta:,} characters ({old_mdl:,} -> {new_mdl:,})') + new_mdl = len(filtered_json) + if old_mdl > new_mdl: + delta = old_mdl - new_mdl + log.info(f'{self.key}: metadata reduced by {delta:,} characters ({old_mdl:,} -> {new_mdl:,})') + if getattr(settings, 'SHRINK_OLD_MEDIA_METADATA', False): + self.metadata = filtered_json + + @property def loaded_metadata(self): + if getattr(settings, 'SHRINK_OLD_MEDIA_METADATA', False): + self.reduce_data try: data = json.loads(self.metadata) if not isinstance(data, dict): @@ -1265,8 +1298,7 @@ class Media(models.Model): @property def directory_path(self): - dirname = self.source.directory_path / self.filename - return dirname.parent + return self.filepath.parent @property def filepath(self): @@ -1375,8 +1407,7 @@ class Media(models.Model): nfo.append(season) # episode = number of video in the year episode = nfo.makeelement('episode', {}) - episode_number = self.calculate_episode_number() - episode.text = str(episode_number) if episode_number else '' + episode.text = self.get_episode_str() episode.tail = '\n ' nfo.append(episode) # ratings = media metadata youtube rating @@ -1389,7 +1420,7 @@ class Media(models.Model): rating_attrs = OrderedDict() rating_attrs['name'] = 'youtube' rating_attrs['max'] = '5' - rating_attrs['default'] = 'True' + rating_attrs['default'] = 'true' rating = nfo.makeelement('rating', rating_attrs) rating.text = '\n ' rating.append(value) @@ -1397,7 +1428,8 @@ class Media(models.Model): rating.tail = '\n ' ratings = nfo.makeelement('ratings', {}) ratings.text = '\n ' - ratings.append(rating) + if self.rating is not None: + ratings.append(rating) ratings.tail = '\n ' nfo.append(ratings) # plot = media metadata description @@ -1414,7 +1446,8 @@ class Media(models.Model): mpaa = nfo.makeelement('mpaa', {}) mpaa.text = str(self.age_limit) mpaa.tail = '\n ' - nfo.append(mpaa) + if self.age_limit and self.age_limit > 0: + nfo.append(mpaa) # runtime = media metadata duration in seconds runtime = nfo.makeelement('runtime', {}) runtime.text = str(self.duration) @@ -1526,6 +1559,16 @@ class Media(models.Model): return position_counter position_counter += 1 + def get_episode_str(self, use_padding=False): + episode_number = self.calculate_episode_number() + if not episode_number: + return '' + + if use_padding: + return f'{episode_number:02}' + + return str(episode_number) + def rename_files(self): if self.downloaded and self.media_file: old_video_path = Path(self.media_file.path) diff --git a/tubesync/sync/tasks.py b/tubesync/sync/tasks.py index 988ea8fb..84dc28b7 100644 --- a/tubesync/sync/tasks.py +++ b/tubesync/sync/tasks.py @@ -26,7 +26,7 @@ from common.errors import NoMediaException, DownloadFailedException from common.utils import json_serial from .models import Source, Media, MediaServer from .utils import (get_remote_image, resize_image_to_height, delete_file, - write_text_file) + write_text_file, filter_response) from .filtering import filter_media @@ -305,7 +305,10 @@ def download_media_metadata(media_id): return source = media.source metadata = media.index_metadata() - media.metadata = json.dumps(metadata, default=json_serial) + response = metadata + if getattr(settings, 'SHRINK_NEW_MEDIA_METADATA', False): + response = filter_response(metadata, True) + media.metadata = json.dumps(response, separators=(',', ':'), default=json_serial) upload_date = media.upload_date # Media must have a valid upload date if upload_date: @@ -447,7 +450,11 @@ def download_media(media_id): # If selected, write an NFO file if media.source.write_nfo: log.info(f'Writing media NFO file to: {media.nfopath}') - write_text_file(media.nfopath, media.nfoxml) + try: + write_text_file(media.nfopath, media.nfoxml) + except PermissionError as e: + log.warn(f'A permissions problem occured when writing the new media NFO file: {e.msg}') + pass # Schedule a task to update media servers for mediaserver in MediaServer.objects.all(): log.info(f'Scheduling media server updates') diff --git a/tubesync/sync/templates/sync/_mediaformatvars.html b/tubesync/sync/templates/sync/_mediaformatvars.html index 438b200a..06068f90 100644 --- a/tubesync/sync/templates/sync/_mediaformatvars.html +++ b/tubesync/sync/templates/sync/_mediaformatvars.html @@ -73,6 +73,11 @@ Playlist title of media, if it's in a playlist Some Playlist + + {video_order} + Episode order in playlist, if in playlist (can cause issues if playlist is changed after adding) + 01 + {ext} File extension diff --git a/tubesync/sync/templates/sync/dashboard.html b/tubesync/sync/templates/sync/dashboard.html index 8c27684c..ccf4a6c3 100644 --- a/tubesync/sync/templates/sync/dashboard.html +++ b/tubesync/sync/templates/sync/dashboard.html @@ -125,7 +125,7 @@ Database - Database
{{ database_connection }} + Database
{{ database_connection }}{% if database_filesize %} {{ database_filesize|filesizeformat }}{% endif %} diff --git a/tubesync/sync/templates/sync/media-item.html b/tubesync/sync/templates/sync/media-item.html index 6f751be6..026e5a54 100644 --- a/tubesync/sync/templates/sync/media-item.html +++ b/tubesync/sync/templates/sync/media-item.html @@ -146,7 +146,7 @@
ID: {{ format.format_id }} {% if format.vcodec|lower != 'none' %}, {{ format.format_note }} ({{ format.width }}x{{ format.height }}), fps:{{ format.fps|lower }}, video:{{ format.vcodec }} @{{ format.tbr }}k{% endif %} - {% if format.acodec|lower != 'none' %}, audio:{{ format.acodec }} @{{ format.abr }}k / {{ format.asr }}Hz{% endif %} + {% if format.acodec|lower != 'none' %}, audio:{{ format.acodec }} @{{ format.abr }}k / {{ format.asr }}Hz {{ format.format_note }}{% endif %} {% if format.format_id == combined_format or format.format_id == audio_format or format.format_id == video_format %}(matched){% endif %}
{% empty %} diff --git a/tubesync/sync/tests.py b/tubesync/sync/tests.py index 8f0de6ef..2704058f 100644 --- a/tubesync/sync/tests.py +++ b/tubesync/sync/tests.py @@ -18,6 +18,7 @@ from background_task.models import Task from .models import Source, Media from .tasks import cleanup_old_media from .filtering import filter_media +from .utils import filter_response class FrontEndTestCase(TestCase): @@ -1709,6 +1710,84 @@ class FormatMatchingTestCase(TestCase): f'expected {expected_match_result}') +class ResponseFilteringTestCase(TestCase): + + def setUp(self): + # Disable general logging for test case + logging.disable(logging.CRITICAL) + # Add a test source + self.source = Source.objects.create( + source_type=Source.SOURCE_TYPE_YOUTUBE_CHANNEL, + key='testkey', + name='testname', + directory='testdirectory', + index_schedule=3600, + delete_old_media=False, + days_to_keep=14, + source_resolution=Source.SOURCE_RESOLUTION_1080P, + source_vcodec=Source.SOURCE_VCODEC_VP9, + source_acodec=Source.SOURCE_ACODEC_OPUS, + prefer_60fps=False, + prefer_hdr=False, + fallback=Source.FALLBACK_FAIL + ) + # Add some media + self.media = Media.objects.create( + key='mediakey', + source=self.source, + metadata='{}' + ) + + def test_metadata_20230629(self): + self.media.metadata = all_test_metadata['20230629'] + self.media.save() + + unfiltered = self.media.loaded_metadata + filtered = filter_response(self.media.loaded_metadata) + self.assertIn('formats', unfiltered.keys()) + self.assertIn('formats', filtered.keys()) + # filtered 'downloader_options' + self.assertIn('downloader_options', unfiltered['formats'][10].keys()) + self.assertNotIn('downloader_options', filtered['formats'][10].keys()) + # filtered 'http_headers' + self.assertIn('http_headers', unfiltered['formats'][0].keys()) + self.assertNotIn('http_headers', filtered['formats'][0].keys()) + # did not lose any formats + self.assertEqual(48, len(unfiltered['formats'])) + self.assertEqual(48, len(filtered['formats'])) + self.assertEqual(len(unfiltered['formats']), len(filtered['formats'])) + # did not remove everything with url + self.assertIn('original_url', unfiltered.keys()) + self.assertIn('original_url', filtered.keys()) + self.assertEqual(unfiltered['original_url'], filtered['original_url']) + # did reduce the size of the metadata + self.assertTrue(len(str(filtered)) < len(str(unfiltered))) + + url_keys = [] + for format in unfiltered['formats']: + for key in format.keys(): + if 'url' in key: + url_keys.append((format['format_id'], key, format[key],)) + unfiltered_url_keys = url_keys + self.assertEqual(63, len(unfiltered_url_keys), msg=str(unfiltered_url_keys)) + + url_keys = [] + for format in filtered['formats']: + for key in format.keys(): + if 'url' in key: + url_keys.append((format['format_id'], key, format[key],)) + filtered_url_keys = url_keys + self.assertEqual(3, len(filtered_url_keys), msg=str(filtered_url_keys)) + + url_keys = [] + for lang_code, captions in filtered['automatic_captions'].items(): + for caption in captions: + for key in caption.keys(): + if 'url' in key: + url_keys.append((lang_code, caption['ext'], caption[key],)) + self.assertEqual(0, len(url_keys), msg=str(url_keys)) + + class TasksTestCase(TestCase): def setUp(self): diff --git a/tubesync/sync/utils.py b/tubesync/sync/utils.py index 00fde969..71a872bc 100644 --- a/tubesync/sync/utils.py +++ b/tubesync/sync/utils.py @@ -1,6 +1,7 @@ import os import re import math +from copy import deepcopy from operator import itemgetter from pathlib import Path from tempfile import NamedTemporaryFile @@ -154,7 +155,8 @@ def write_text_file(filepath, filedata): bytes_written = f.write(filedata) # chmod a+r temp_file old_mode = new_filepath.stat().st_mode - new_filepath.chmod(0o444 | old_mode) + if 0o444 != (0o444 & old_mode): + new_filepath.chmod(0o444 | old_mode) if not file_is_editable(new_filepath): new_filepath.unlink() raise ValueError(f'File cannot be edited or removed: {filepath}') @@ -201,6 +203,95 @@ def normalize_codec(codec_str): return result +def _url_keys(arg_dict, filter_func): + result = {} + for key in arg_dict.keys(): + if 'url' in key: + result.update( + {key: filter_func(key=key, url=arg_dict[key])} + ) + return result + + +def _drop_url_keys(arg_dict, key, filter_func): + if key in arg_dict.keys(): + for val_dict in arg_dict[key]: + for url_key, remove in _url_keys(val_dict, filter_func).items(): + if remove is True: + del val_dict[url_key] + + +def filter_response(arg_dict, copy_arg=False): + ''' + Clean up the response so as to not store useless metadata in the database. + ''' + response_dict = arg_dict + # raise an exception for an unexpected argument type + if not isinstance(response_dict, dict): + raise TypeError(f'response_dict must be a dict, got "{type(response_dict)}"') + + if copy_arg: + response_dict = deepcopy(arg_dict) + + # optimize the empty case + if not response_dict: + return response_dict + + # beginning of formats cleanup {{{ + # drop urls that expire, or restrict IPs + def drop_format_url(**kwargs): + url = kwargs['url'] + return ( + url + and '://' in url + and ( + '/ip/' in url + or 'ip=' in url + or '/expire/' in url + or 'expire=' in url + ) + ) + + # these format keys are not useful to us + drop_keys = frozenset(( + 'downloader_options', + 'fragments', + 'http_headers', + '__needs_testing', + '__working', + )) + for key in frozenset(('formats', 'requested_formats',)): + _drop_url_keys(response_dict, key, drop_format_url) + if key in response_dict.keys(): + for format in response_dict[key]: + for drop_key in drop_keys: + if drop_key in format.keys(): + del format[drop_key] + # end of formats cleanup }}} + + # beginning of subtitles cleanup {{{ + # drop urls that expire + def drop_subtitles_url(**kwargs): + url = kwargs['url'] + return ( + url + and '://' in url + and ( + '/expire/' in url + or '&expire=' in url + ) + ) + + for key in frozenset(('subtitles', 'automatic_captions',)): + if key in response_dict.keys(): + key_dict = response_dict[key] + for lang_code in key_dict: + _drop_url_keys(key_dict, lang_code, drop_subtitles_url) + # end of subtitles cleanup }}} + + return response_dict + + def parse_media_format(format_dict): ''' This parser primarily adapts the format dict returned by youtube-dl into a @@ -244,6 +335,7 @@ def parse_media_format(format_dict): return { 'id': format_dict.get('format_id', ''), 'format': format_str, + 'format_note': format_dict.get('format_note', ''), 'format_verbose': format_dict.get('format', ''), 'height': height, 'width': width, diff --git a/tubesync/sync/views.py b/tubesync/sync/views.py index 52090042..43a51b91 100644 --- a/tubesync/sync/views.py +++ b/tubesync/sync/views.py @@ -14,7 +14,7 @@ from django.views.generic.detail import SingleObjectMixin from django.core.exceptions import SuspiciousFileOperation from django.http import HttpResponse from django.urls import reverse_lazy -from django.db import IntegrityError +from django.db import connection, IntegrityError from django.db.models import Q, Count, Sum, When, Case from django.forms import Form, ValidationError from django.utils.text import slugify @@ -85,6 +85,12 @@ class DashboardView(TemplateView): data['config_dir'] = str(settings.CONFIG_BASE_DIR) data['downloads_dir'] = str(settings.DOWNLOAD_ROOT) data['database_connection'] = settings.DATABASE_CONNECTION_STR + # Add the database filesize when using db.sqlite3 + data['database_filesize'] = None + db_name = str(connection.get_connection_params()['database']) + db_path = pathlib.Path(db_name) if '/' == db_name[0] else None + if db_path and 'sqlite' == connection.vendor: + data['database_filesize'] = db_path.stat().st_size return data @@ -193,10 +199,15 @@ class ValidateSourceView(FormView): Source.SOURCE_TYPE_YOUTUBE_PLAYLIST: ('https://www.youtube.com/playlist?list=' 'PL590L5WQmH8dpP0RyH5pCfIaDEdt9nk7r') } + _youtube_domains = frozenset({ + 'youtube.com', + 'm.youtube.com', + 'www.youtube.com', + }) validation_urls = { Source.SOURCE_TYPE_YOUTUBE_CHANNEL: { 'scheme': 'https', - 'domains': ('m.youtube.com', 'www.youtube.com'), + 'domains': _youtube_domains, 'path_regex': '^\/(c\/)?([^\/]+)(\/videos)?$', 'path_must_not_match': ('/playlist', '/c/playlist'), 'qs_args': [], @@ -205,7 +216,7 @@ class ValidateSourceView(FormView): }, Source.SOURCE_TYPE_YOUTUBE_CHANNEL_ID: { 'scheme': 'https', - 'domains': ('m.youtube.com', 'www.youtube.com'), + 'domains': _youtube_domains, 'path_regex': '^\/channel\/([^\/]+)(\/videos)?$', 'path_must_not_match': ('/playlist', '/c/playlist'), 'qs_args': [], @@ -214,7 +225,7 @@ class ValidateSourceView(FormView): }, Source.SOURCE_TYPE_YOUTUBE_PLAYLIST: { 'scheme': 'https', - 'domains': ('m.youtube.com', 'www.youtube.com'), + 'domains': _youtube_domains, 'path_regex': '^\/(playlist|watch)$', 'path_must_not_match': (), 'qs_args': ('list',), @@ -286,11 +297,36 @@ class ValidateSourceView(FormView): url = reverse_lazy('sync:add-source') fields_to_populate = self.prepopulate_fields.get(self.source_type) fields = {} + value = self.key + use_channel_id = ( + 'youtube-channel' == self.source_type_str and + '@' == self.key[0] + ) + if use_channel_id: + old_key = self.key + old_source_type = self.source_type + old_source_type_str = self.source_type_str + + self.source_type_str = 'youtube-channel-id' + self.source_type = self.source_types.get(self.source_type_str, None) + index_url = Source.create_index_url(self.source_type, self.key, 'videos') + try: + self.key = youtube.get_channel_id( + index_url.replace('/channel/', '/') + ) + except youtube.YouTubeError as e: + # It did not work, revert to previous behavior + self.key = old_key + self.source_type = old_source_type + self.source_type_str = old_source_type_str + for field in fields_to_populate: if field == 'source_type': fields[field] = self.source_type - elif field in ('key', 'name', 'directory'): + elif field == 'key': fields[field] = self.key + elif field in ('name', 'directory'): + fields[field] = value return append_uri_params(url, fields) diff --git a/tubesync/sync/youtube.py b/tubesync/sync/youtube.py index cd8a2eec..86e75c0b 100644 --- a/tubesync/sync/youtube.py +++ b/tubesync/sync/youtube.py @@ -46,6 +46,32 @@ def get_yt_opts(): opts.update({'cookiefile': cookie_file_path}) return opts +def get_channel_id(url): + # yt-dlp --simulate --no-check-formats --playlist-items 1 + # --print 'pre_process:%(playlist_channel_id,playlist_id,channel_id)s' + opts = get_yt_opts() + opts.update({ + 'skip_download': True, + 'simulate': True, + 'logger': log, + 'extract_flat': True, # Change to False to get detailed info + 'check_formats': False, + 'playlist_items': '1', + }) + + with yt_dlp.YoutubeDL(opts) as y: + try: + response = y.extract_info(url, download=False) + except yt_dlp.utils.DownloadError as e: + raise YouTubeError(f'Failed to extract channel ID for "{url}": {e}') from e + else: + try: + channel_id = response['channel_id'] + except Exception as e: + raise YouTubeError(f'Failed to extract channel ID for "{url}": {e}') from e + else: + return channel_id + def get_channel_image_info(url): opts = get_yt_opts() opts.update({ @@ -82,6 +108,8 @@ def _subscriber_only(msg='', response=None): return True if ': Join this channel' in msg: return True + if 'Join this YouTube channel' in msg: + return True else: # ignore msg entirely if not isinstance(response, dict): diff --git a/tubesync/tubesync/gunicorn.py b/tubesync/tubesync/gunicorn.py index d59c1389..0058fa65 100644 --- a/tubesync/tubesync/gunicorn.py +++ b/tubesync/tubesync/gunicorn.py @@ -10,9 +10,10 @@ def get_num_workers(): num_workers = int(os.getenv('GUNICORN_WORKERS', 3)) except ValueError: num_workers = cpu_workers - if 0 > num_workers > cpu_workers: - num_workers = cpu_workers - return num_workers + if 0 < num_workers < cpu_workers: + return num_workers + else: + return cpu_workers def get_bind(): diff --git a/tubesync/tubesync/local_settings.py.container b/tubesync/tubesync/local_settings.py.container index e75778b8..0114e76d 100644 --- a/tubesync/tubesync/local_settings.py.container +++ b/tubesync/tubesync/local_settings.py.container @@ -87,6 +87,12 @@ SOURCE_DOWNLOAD_DIRECTORY_PREFIX_STR = os.getenv('TUBESYNC_DIRECTORY_PREFIX', 'T SOURCE_DOWNLOAD_DIRECTORY_PREFIX = True if SOURCE_DOWNLOAD_DIRECTORY_PREFIX_STR == 'true' else False +SHRINK_NEW_MEDIA_METADATA_STR = os.getenv('TUBESYNC_SHRINK_NEW', 'false').strip().lower() +SHRINK_NEW_MEDIA_METADATA = ( 'true' == SHRINK_NEW_MEDIA_METADATA_STR ) +SHRINK_OLD_MEDIA_METADATA_STR = os.getenv('TUBESYNC_SHRINK_OLD', 'false').strip().lower() +SHRINK_OLD_MEDIA_METADATA = ( 'true' == SHRINK_OLD_MEDIA_METADATA_STR ) + + VIDEO_HEIGHT_CUTOFF = int(os.getenv("TUBESYNC_VIDEO_HEIGHT_CUTOFF", "240")) diff --git a/tubesync/tubesync/wsgi.py b/tubesync/tubesync/wsgi.py index 71c61003..74912aef 100644 --- a/tubesync/tubesync/wsgi.py +++ b/tubesync/tubesync/wsgi.py @@ -1,5 +1,4 @@ import os -from urllib.parse import urljoin from django.core.wsgi import get_wsgi_application @@ -16,10 +15,9 @@ def application(environ, start_response): else: raise Exception(f'DJANGO_URL_PREFIX must end with a /, ' f'got: {DJANGO_URL_PREFIX}') - if script_name: - static_url = urljoin(script_name, 'static/') + if script_name is not None: environ['SCRIPT_NAME'] = script_name path_info = environ['PATH_INFO'] - if path_info.startswith(script_name) and not path_info.startswith(static_url): + if path_info.startswith(script_name): environ['PATH_INFO'] = path_info[len(script_name) - 1:] return _application(environ, start_response) diff --git a/tubesync/upgrade_yt-dlp.sh b/tubesync/upgrade_yt-dlp.sh new file mode 100755 index 00000000..9da6d555 --- /dev/null +++ b/tubesync/upgrade_yt-dlp.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +warning_message() { + cat <&2 + +pip3() { + local pip_runner pip_whl run_whl + + # pipenv + pip_runner='/usr/lib/python3/dist-packages/pipenv/patched/pip/__pip-runner__.py' + test -s "${pip_runner}" || pip_runner='' + + # python3-pip-whl + pip_whl="$(ls -1r /usr/share/python-wheels/pip-*-py3-none-any.whl | head -n 1)" + run_whl="${pip_whl}/pip" + + python3 "${pip_runner:-"${run_whl}"}" "$@" +} + +warning_message +test -n "${TUBESYNC_DEBUG}" || exit 1 + +# Use the flag added in 23.0.1, if possible. +# https://github.com/pypa/pip/pull/11780 +break_system_packages='--break-system-packages' +pip_version="$(pip3 --version | awk '$1 = "pip" { print $2; exit; }')" +if [[ "${pip_version}" < "23.0.1" ]]; then + break_system_packages='' +fi + +pip3 install --upgrade ${break_system_packages} yt-dlp +