mirror of
https://github.com/meeb/tubesync.git
synced 2025-06-23 13:36:35 +00:00
Merge branch 'main' into patch-14
This commit is contained in:
commit
3effc2ad96
41
.github/actions/get-tag/action.yml
vendored
Normal file
41
.github/actions/get-tag/action.yml
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
name: Get tag
|
||||
description: Get tag name from GITHUB_REF environment variable
|
||||
inputs:
|
||||
strip_v:
|
||||
required: false
|
||||
default: false
|
||||
description: Whether to strip "v" from the tag or not
|
||||
outputs:
|
||||
tag:
|
||||
value: ${{ steps.set.outputs.tag }}
|
||||
description: Git tag name
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Set outputs
|
||||
id: 'set'
|
||||
env:
|
||||
INPUT_STRIP_V: '${{ inputs.strip_v }}'
|
||||
shell: 'bash'
|
||||
run: |
|
||||
tag="${GITHUB_REF}"
|
||||
printf -- 'Manipulating string: %s\n' "${tag}"
|
||||
test -n "${tag}" || exit 1
|
||||
|
||||
case "${tag}" in
|
||||
(refs/tags/*) tag="${tag#refs/tags/}" ;;
|
||||
(*) printf -- 'Not a tag ref\n' ; exit 2 ;;
|
||||
esac
|
||||
|
||||
if [ 'true' = "${INPUT_STRIP_V,,}" ]
|
||||
then
|
||||
tag="${tag#[Vv]}"
|
||||
fi
|
||||
|
||||
set_sl_var() { local f='%s=%s\n' ; printf -- "${f}" "$@" ; } ;
|
||||
|
||||
set_sl_var tag "${tag}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
set_sl_var 'tag ' " ${tag}"
|
||||
|
67
.github/workflows/ci.yaml
vendored
67
.github/workflows/ci.yaml
vendored
@ -7,10 +7,11 @@ on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'main'
|
||||
- 'test-*'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- 'main'
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
@ -83,7 +84,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13']
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Python ${{ matrix.python-version }}
|
||||
@ -92,16 +93,63 @@ jobs:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pipenv
|
||||
PIPENV_VERBOSITY=64 pipenv install --system --skip-lock
|
||||
python -m pip install uv
|
||||
uv --no-config --no-managed-python --no-progress \
|
||||
pip install --system --strict pipenv
|
||||
pipenv lock
|
||||
pipenv requirements | tee requirements.txt
|
||||
#PIPENV_VERBOSITY=64 pipenv install --system --skip-lock
|
||||
uv --no-config --no-managed-python --no-progress \
|
||||
pip install --system --strict --requirements requirements.txt
|
||||
- name: Set up Django environment
|
||||
run: |
|
||||
cp -v -p tubesync/tubesync/local_settings.py.example tubesync/tubesync/local_settings.py
|
||||
cp -v -a -t "${Python3_ROOT_DIR}"/lib/python3.*/site-packages/background_task/ patches/background_task/*
|
||||
cp -v -a -t "${Python3_ROOT_DIR}"/lib/python3.*/site-packages/yt_dlp/ patches/yt_dlp/*
|
||||
cd tubesync && python3 -B manage.py collectstatic --no-input --link
|
||||
- name: Check with ruff
|
||||
continue-on-error: false
|
||||
run: |
|
||||
target_version='py310'
|
||||
ignore_csv_list='E701,E722,E731'
|
||||
cd tubesync
|
||||
# output formats:
|
||||
# "full" | "concise" | "grouped" |
|
||||
# "json" | "junit" | "github" | "gitlab" |
|
||||
# "pylint" | "azure"
|
||||
{
|
||||
echo '## Output from `ruff check` for `tubesync`'
|
||||
echo ''
|
||||
echo '### Formats'
|
||||
echo ''
|
||||
for fmt in full concise grouped pylint
|
||||
do
|
||||
echo '<details>'
|
||||
echo '<summary>'"${fmt}"'</summary>'
|
||||
echo ''
|
||||
echo '#### '"${fmt}"' output format'
|
||||
echo ''
|
||||
echo '```'
|
||||
uvx --no-config --no-managed-python --no-progress --isolated \
|
||||
ruff check --exit-zero \
|
||||
--target-version "${target_version}" \
|
||||
--output-format "${fmt}" \
|
||||
--extend-select RUF100 \
|
||||
--ignore "${ignore_csv_list}"
|
||||
echo ''
|
||||
echo '```'
|
||||
echo ''
|
||||
echo '</details>'
|
||||
echo ''
|
||||
done
|
||||
} >> "${GITHUB_STEP_SUMMARY}"
|
||||
uvx --no-config --no-managed-python --no-progress --isolated \
|
||||
ruff check --exit-zero \
|
||||
--target-version "${target_version}" \
|
||||
--output-format github \
|
||||
--ignore "${ignore_csv_list}"
|
||||
- name: Run Django tests
|
||||
run: cd tubesync && python3 manage.py test --verbosity=2
|
||||
run: cd tubesync && python3 -B -W default manage.py test --verbosity=2
|
||||
|
||||
containerise:
|
||||
if: ${{ !cancelled() && 'success' == needs.info.result }}
|
||||
@ -135,14 +183,15 @@ jobs:
|
||||
push: false
|
||||
tags: ghcr.io/${{ needs.info.outputs.lowercase-github-actor }}/${{ env.IMAGE_NAME }}:dive
|
||||
- name: Analysis with `dive`
|
||||
continue-on-error: false
|
||||
run: |
|
||||
docker run --rm \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
'ghcr.io/wagoodman/dive' \
|
||||
'ghcr.io/${{ needs.info.outputs.lowercase-github-actor }}/${{ env.IMAGE_NAME }}:dive' \
|
||||
--ci \
|
||||
--highestUserWastedPercent '0.03' \
|
||||
--highestWastedBytes '10M'
|
||||
--highestUserWastedPercent '0.05' \
|
||||
--highestWastedBytes '50M'
|
||||
- name: Build and push
|
||||
id: build-push
|
||||
timeout-minutes: 60
|
||||
|
88
.github/workflows/release.yaml
vendored
88
.github/workflows/release.yaml
vendored
@ -8,31 +8,89 @@ on:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
info:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
ffmpeg-date: ${{ steps.jq.outputs.FFMPEG_DATE }}
|
||||
ffmpeg-releases: ${{ steps.ffmpeg.outputs.releases }}
|
||||
ffmpeg-version: ${{ steps.jq.outputs.FFMPEG_VERSION }}
|
||||
lowercase-github-actor: ${{ steps.github-actor.outputs.lowercase }}
|
||||
lowercase-github-repository_owner: ${{ steps.github-repository_owner.outputs.lowercase }}
|
||||
tag: ${{ steps.tag.outputs.tag }}
|
||||
ytdlp-latest-release: ${{ steps.yt-dlp.outputs.latest-release }}
|
||||
ytdlp-releases: ${{ steps.yt-dlp.outputs.releases }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get tag
|
||||
id: tag
|
||||
uses: ./.github/actions/get-tag
|
||||
- name: Lowercase github username
|
||||
id: github-actor
|
||||
uses: ./.github/actions/string-case
|
||||
with:
|
||||
string: ${{ github.actor }}
|
||||
- name: Lowercase github repository owner
|
||||
id: github-repository_owner
|
||||
uses: ./.github/actions/string-case
|
||||
with:
|
||||
string: ${{ github.repository_owner }}
|
||||
- name: Retrieve yt-dlp/FFmpeg-Builds releases with GitHub CLI
|
||||
id: ffmpeg
|
||||
uses: ./.github/actions/FFmpeg
|
||||
- name: Retrieve yt-dlp/yt-dlp releases with GitHub CLI
|
||||
id: yt-dlp
|
||||
uses: ./.github/actions/yt-dlp
|
||||
- name: Set outputs with jq
|
||||
id: jq
|
||||
run: |
|
||||
cat >| .ffmpeg.releases.json <<'EOF'
|
||||
${{ steps.ffmpeg.outputs.releases }}
|
||||
EOF
|
||||
mk_delim() { local f='%s_EOF_%d_' ; printf -- "${f}" "$1" "${RANDOM}" ; } ;
|
||||
open_ml_var() { local f=''\%'s<<'\%'s\n' ; printf -- "${f}" "$2" "$1" ; } ;
|
||||
close_ml_var() { local f='%s\n' ; printf -- "${f}" "$1" ; } ;
|
||||
{
|
||||
var='FFMPEG_DATE' ;
|
||||
delim="$(mk_delim "${var}")" ;
|
||||
open_ml_var "${delim}" "${var}" ;
|
||||
jq_arg='[foreach .[] as $release ([{}, []]; [ .[0] + {($release.commit): ([ $release.date ] + (.[0][($release.commit)] // []) ) }, [ .[1][0] // $release.commit ] ] ; .[0][(.[1][0])] ) ][-1][0]' ;
|
||||
jq -r "${jq_arg}" -- .ffmpeg.releases.json ;
|
||||
close_ml_var "${delim}" "${var}" ;
|
||||
|
||||
ffmpeg_date="$( jq -r "${jq_arg}" -- .ffmpeg.releases.json )"
|
||||
|
||||
var='FFMPEG_VERSION' ;
|
||||
delim="$(mk_delim "${var}")" ;
|
||||
open_ml_var "${delim}" "${var}" ;
|
||||
jq_arg='.[]|select(.date == $date)|.versions[]|select(startswith("N-"))' ;
|
||||
jq -r --arg date "${ffmpeg_date}" "${jq_arg}" -- .ffmpeg.releases.json ;
|
||||
close_ml_var "${delim}" "${var}" ;
|
||||
unset -v delim jq_arg var ;
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
cat -v "${GITHUB_OUTPUT}"
|
||||
rm -v -f .ffmpeg.releases.json
|
||||
|
||||
containerise:
|
||||
needs: ['info']
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Get tag
|
||||
id: tag
|
||||
uses: dawidd6/action-get-tag@v1
|
||||
- uses: docker/build-push-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Log into GitHub Container Registry
|
||||
run: echo "${{ secrets.REGISTRY_ACCESS_TOKEN }}" | docker login https://ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
- name: Lowercase github username for ghcr
|
||||
id: string
|
||||
uses: ASzc/change-string-case-action@v1
|
||||
with:
|
||||
string: ${{ github.actor }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ghcr.io/${{ steps.string.outputs.lowercase }}/${{ env.IMAGE_NAME }}:${{ steps.tag.outputs.tag }}
|
||||
cache-from: type=registry,ref=ghcr.io/${{ steps.string.outputs.lowercase }}/${{ env.IMAGE_NAME }}:${{ steps.tag.outputs.tag }}
|
||||
cache-to: type=inline
|
||||
tags: ghcr.io/${{ needs.info.outputs.lowercase-github-actor }}/${{ env.IMAGE_NAME }}:${{ needs.info.outputs.tag }}
|
||||
cache-from: |
|
||||
type=registry,ref=ghcr.io/${{ needs.info.outputs.lowercase-github-repository_owner }}/${{ env.IMAGE_NAME }}:latest
|
||||
type=gha
|
||||
build-args: |
|
||||
IMAGE_NAME=${{ env.IMAGE_NAME }}
|
||||
FFMPEG_DATE=${{ needs.info.outputs.ffmpeg-date }}
|
||||
FFMPEG_VERSION=${{ needs.info.outputs.ffmpeg-version }}
|
||||
YTDLP_DATE=${{ fromJSON(needs.info.outputs.ytdlp-latest-release).tag.name }}
|
||||
|
77
Dockerfile
77
Dockerfile
@ -24,6 +24,7 @@ FROM debian:${DEBIAN_VERSION} AS tubesync-base
|
||||
ARG TARGETARCH
|
||||
|
||||
ENV DEBIAN_FRONTEND="noninteractive" \
|
||||
APT_KEEP_ARCHIVES=1 \
|
||||
HOME="/root" \
|
||||
LANGUAGE="en_US.UTF-8" \
|
||||
LANG="en_US.UTF-8" \
|
||||
@ -39,17 +40,46 @@ RUN --mount=type=cache,id=apt-lib-cache-${TARGETARCH},sharing=private,target=/va
|
||||
rm -f /var/cache/apt/*cache.bin ; \
|
||||
# Update from the network and keep cache
|
||||
rm -f /etc/apt/apt.conf.d/docker-clean ; \
|
||||
# Do not generate more /var/cache/apt/*cache.bin files
|
||||
# hopefully soon, this will be included in Debian images
|
||||
printf -- >| /etc/apt/apt.conf.d/docker-disable-pkgcache \
|
||||
'Dir::Cache::%spkgcache "";\n' '' src ; \
|
||||
chmod a+r /etc/apt/apt.conf.d/docker-disable-pkgcache ; \
|
||||
set -x && \
|
||||
apt-get update && \
|
||||
# Install locales
|
||||
LC_ALL='C.UTF-8' LANG='C.UTF-8' LANGUAGE='C.UTF-8' \
|
||||
apt-get -y --no-install-recommends install locales && \
|
||||
# localedef -v -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 && \
|
||||
printf -- "en_US.UTF-8 UTF-8\n" > /etc/locale.gen && \
|
||||
locale-gen en_US.UTF-8 && \
|
||||
locale-gen && \
|
||||
# Clean up
|
||||
apt-get -y autopurge && \
|
||||
apt-get -y autoclean && \
|
||||
rm -f /var/cache/debconf/*.dat-old
|
||||
|
||||
FROM alpine:${ALPINE_VERSION} AS openresty-debian
|
||||
ARG TARGETARCH
|
||||
ARG DEBIAN_VERSION
|
||||
ADD 'https://openresty.org/package/pubkey.gpg' '/downloaded/pubkey.gpg'
|
||||
RUN set -eu ; \
|
||||
decide_arch() { \
|
||||
case "${TARGETARCH}" in \
|
||||
(amd64) printf -- '' ;; \
|
||||
(arm64) printf -- 'arm64/' ;; \
|
||||
esac ; \
|
||||
} ; \
|
||||
set -x ; \
|
||||
mkdir -v -p '/etc/apt/trusted.gpg.d' && \
|
||||
apk --no-cache --no-progress add cmd:gpg2 && \
|
||||
gpg2 --dearmor \
|
||||
-o '/etc/apt/trusted.gpg.d/openresty.gpg' \
|
||||
< '/downloaded/pubkey.gpg' && \
|
||||
mkdir -v -p '/etc/apt/sources.list.d' && \
|
||||
printf -- >| '/etc/apt/sources.list.d/openresty.list' \
|
||||
'deb http://openresty.org/package/%sdebian %s openresty' \
|
||||
"$(decide_arch)" "${DEBIAN_VERSION%-slim}"
|
||||
|
||||
FROM alpine:${ALPINE_VERSION} AS ffmpeg-download
|
||||
ARG FFMPEG_DATE
|
||||
ARG FFMPEG_VERSION
|
||||
@ -215,21 +245,22 @@ RUN set -eu ; \
|
||||
case "${arg1}" in \
|
||||
(amd64) printf -- 'x86_64' ;; \
|
||||
(arm64) printf -- 'aarch64' ;; \
|
||||
(armv7l) printf -- 'arm' ;; \
|
||||
(arm|armv7l) printf -- 'armhf' ;; \
|
||||
(*) printf -- '%s' "${arg1}" ;; \
|
||||
esac ; \
|
||||
unset -v arg1 ; \
|
||||
} ; \
|
||||
\
|
||||
file_ext="${CHECKSUM_ALGORITHM}" ; \
|
||||
apk --no-cache --no-progress add "cmd:${CHECKSUM_ALGORITHM}sum" ; \
|
||||
mkdir -v /verified ; \
|
||||
cd /downloaded ; \
|
||||
for f in *.sha256 ; \
|
||||
for f in *."${file_ext}" ; \
|
||||
do \
|
||||
"${CHECKSUM_ALGORITHM}sum" --check --warn --strict "${f}" || exit ; \
|
||||
ln -v "${f%.sha256}" /verified/ || exit ; \
|
||||
ln -v "${f%.${file_ext}}" /verified/ || exit ; \
|
||||
done ; \
|
||||
unset -v f ; \
|
||||
unset -v f file_ext ; \
|
||||
\
|
||||
S6_ARCH="$(decide_arch "${TARGETARCH}")" ; \
|
||||
set -x ; \
|
||||
@ -248,7 +279,38 @@ RUN set -eu ; \
|
||||
FROM scratch AS s6-overlay
|
||||
COPY --from=s6-overlay-extracted /s6-overlay-rootfs /
|
||||
|
||||
FROM tubesync-base AS tubesync
|
||||
FROM tubesync-base AS tubesync-openresty
|
||||
|
||||
COPY --from=openresty-debian \
|
||||
/etc/apt/trusted.gpg.d/openresty.gpg /etc/apt/trusted.gpg.d/openresty.gpg
|
||||
COPY --from=openresty-debian \
|
||||
/etc/apt/sources.list.d/openresty.list /etc/apt/sources.list.d/openresty.list
|
||||
|
||||
RUN --mount=type=cache,id=apt-lib-cache-${TARGETARCH},sharing=private,target=/var/lib/apt \
|
||||
--mount=type=cache,id=apt-cache-cache,sharing=private,target=/var/cache/apt \
|
||||
set -x && \
|
||||
apt-get update && \
|
||||
apt-get -y --no-install-recommends install nginx-common openresty && \
|
||||
# Clean up
|
||||
apt-get -y autopurge && \
|
||||
apt-get -y autoclean && \
|
||||
rm -v -f /var/cache/debconf/*.dat-old
|
||||
|
||||
FROM tubesync-base AS tubesync-nginx
|
||||
|
||||
RUN --mount=type=cache,id=apt-lib-cache-${TARGETARCH},sharing=private,target=/var/lib/apt \
|
||||
--mount=type=cache,id=apt-cache-cache,sharing=private,target=/var/cache/apt \
|
||||
set -x && \
|
||||
apt-get update && \
|
||||
apt-get -y --no-install-recommends install nginx-light && \
|
||||
# openresty binary should still work
|
||||
ln -v -s -T ../sbin/nginx /usr/bin/openresty && \
|
||||
# Clean up
|
||||
apt-get -y autopurge && \
|
||||
apt-get -y autoclean && \
|
||||
rm -v -f /var/cache/debconf/*.dat-old
|
||||
|
||||
FROM tubesync-openresty AS tubesync
|
||||
|
||||
ARG S6_VERSION
|
||||
|
||||
@ -273,7 +335,6 @@ RUN --mount=type=cache,id=apt-lib-cache-${TARGETARCH},sharing=private,target=/va
|
||||
libmariadb3 \
|
||||
libpq5 \
|
||||
libwebp7 \
|
||||
nginx-light \
|
||||
pipenv \
|
||||
pkgconf \
|
||||
python3 \
|
||||
@ -397,7 +458,7 @@ RUN set -x && \
|
||||
mkdir -v -p /downloads/audio && \
|
||||
mkdir -v -p /downloads/video && \
|
||||
# Check nginx configuration copied from config/root/etc
|
||||
nginx -t && \
|
||||
openresty -c /etc/nginx/nginx.conf -e stderr -t && \
|
||||
# Append software versions
|
||||
ffmpeg_version=$(/usr/local/bin/ffmpeg -version | awk -v 'ev=31' '1 == NR && "ffmpeg" == $1 { print $3; ev=0; } END { exit ev; }') && \
|
||||
test -n "${ffmpeg_version}" && \
|
||||
|
4
Pipfile
4
Pipfile
@ -7,7 +7,7 @@ verify_ssl = true
|
||||
autopep8 = "*"
|
||||
|
||||
[packages]
|
||||
django = "<5.2"
|
||||
django = "~=5.2.1"
|
||||
django-sass-processor = {extras = ["management-command"], version = "*"}
|
||||
pillow = "*"
|
||||
whitenoise = "*"
|
||||
@ -24,3 +24,5 @@ yt-dlp = {extras = ["default", "curl-cffi"], version = "*"}
|
||||
emoji = "*"
|
||||
brotli = "*"
|
||||
html5lib = "*"
|
||||
yt-dlp-get-pot = "*"
|
||||
bgutil-ytdlp-pot-provider = "*"
|
||||
|
91
README.md
91
README.md
@ -63,7 +63,7 @@ directory will be a `video` and `audio` subdirectories. All media which only has
|
||||
audio stream (such as music) will download to the `audio` directory. All media with a
|
||||
video stream will be downloaded to the `video` directory. All administration of
|
||||
TubeSync is performed via a web interface. You can optionally add a media server,
|
||||
currently just Plex, to complete the PVR experience.
|
||||
currently only Jellyfin or Plex, to complete the PVR experience.
|
||||
|
||||
|
||||
# Installation
|
||||
@ -146,7 +146,7 @@ services:
|
||||
|
||||
## Optional authentication
|
||||
|
||||
Available in `v1.0` (or `:latest`)and later. If you want to enable a basic username and
|
||||
Available in `v1.0` (or `:latest`) and later. If you want to enable a basic username and
|
||||
password to be required to access the TubeSync dashboard you can set them with the
|
||||
following environment variables:
|
||||
|
||||
@ -188,6 +188,14 @@ $ docker pull ghcr.io/meeb/tubesync:v[number]
|
||||
|
||||
Back-end updates such as database migrations should be automatic.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> `MariaDB` was not automatically upgraded for `UUID` column types.
|
||||
> To see what changes are needed, you can run:
|
||||
> ```bash
|
||||
> docker exec -it tubesync python3 /app/manage.py fix-mariadb --dry-run --uuid-columns
|
||||
> ```
|
||||
> Removing the `--dry-run` will attempt to execute those statements using the configured database connection.
|
||||
|
||||
|
||||
# Moving, backing up, etc.
|
||||
|
||||
@ -221,7 +229,7 @@ As media is indexed and downloaded it will appear in the "media" tab.
|
||||
|
||||
### 3. Media Server updating
|
||||
|
||||
Currently TubeSync supports Plex as a media server. You can add your local Plex server
|
||||
Currently TubeSync supports Plex and Jellyfin as media servers. You can add your local Jellyfin or Plex server
|
||||
under the "media servers" tab.
|
||||
|
||||
|
||||
@ -234,6 +242,13 @@ view these with:
|
||||
$ docker logs --follow tubesync
|
||||
```
|
||||
|
||||
To include logs with an issue report, please exteact a file and attach it to the issue.
|
||||
The command below creates the `TubeSync.logs.txt` file with the logs from the `tubesync` container:
|
||||
|
||||
```bash
|
||||
docker logs -t tubesync > TubeSync.logs.txt 2>&1
|
||||
```
|
||||
|
||||
|
||||
# Advanced usage guides
|
||||
|
||||
@ -250,7 +265,15 @@ and less common features:
|
||||
|
||||
# Warnings
|
||||
|
||||
### 1. Index frequency
|
||||
### 1. Automated file renaming
|
||||
> [!IMPORTANT]
|
||||
> Currently, file renaming is not enabled by default.
|
||||
> Enabling this feature by default is planned in an upcoming release, after `2025-006-01`.
|
||||
>
|
||||
> To prevent your installation from scheduling media file renaming tasks,
|
||||
> you must set [`TUBESYNC_RENAME_ALL_SOURCES=False`](#advanced-configuration) in the environment variables or `RENAME_ALL_SOURCES = False` in [`settings.py`](../1fc0462c11741621350053144ab19cba5f266cb2/tubesync/tubesync/settings.py#L183).
|
||||
|
||||
### 2. Index frequency
|
||||
|
||||
It's a good idea to add sources with as long of an index frequency as possible. This is
|
||||
the duration between indexes of the source. An index is when TubeSync checks to see
|
||||
@ -258,21 +281,21 @@ what videos available on a channel or playlist to find new media. Try and keep t
|
||||
long as possible, up to 24 hours.
|
||||
|
||||
|
||||
### 2. Indexing massive channels
|
||||
### 3. Indexing massive channels
|
||||
|
||||
If you add a massive (several thousand videos) channel to TubeSync and choose "index
|
||||
every hour" or similar short interval it's entirely possible your TubeSync install may
|
||||
spend its entire time just indexing the massive channel over and over again without
|
||||
If you add a massive channel (one with several thousand videos) to TubeSync and choose "index
|
||||
every hour" or a similarly short interval; it's entirely possible that your TubeSync install may
|
||||
spend its entire time indexing the channel, over and over again, without
|
||||
downloading any media. Check your tasks for the status of your TubeSync install.
|
||||
|
||||
If you add a significant amount of "work" due to adding many large channels you may
|
||||
need to increase the number of background workers by setting the `TUBESYNC_WORKERS`
|
||||
environment variable. Try around ~4 at most, although the absolute maximum allowed is 8.
|
||||
|
||||
**Be nice.** it's likely entirely possible your IP address could get throttled by the
|
||||
source if you try and crawl extremely large amounts very quickly. **Try and be polite
|
||||
**Be nice.** It's entirely possible that your IP address could get throttled and/or banned, by the
|
||||
source, if you try to crawl extremely large amounts quickly. **Try to be polite
|
||||
with the smallest amount of indexing and concurrent downloads possible for your needs.**
|
||||
|
||||
Only, if you absolutely must, should you increase [`TUBESYNC_WORKERS`](#advanced-configuration) above its default value.
|
||||
The maximum the software accepts is `8` threads per queue worker process.
|
||||
By default, up to `3` tasks will be executing concurrently.
|
||||
The maximum is `24` concurrent tasks.
|
||||
|
||||
# FAQ
|
||||
|
||||
@ -334,7 +357,7 @@ and you can probably break things by playing in the admin. If you still want to
|
||||
it you can run:
|
||||
|
||||
```bash
|
||||
$ docker exec -ti tubesync python3 /app/manage.py createsuperuser
|
||||
$ docker exec -it tubesync python3 /app/manage.py createsuperuser
|
||||
```
|
||||
|
||||
And follow the instructions to create an initial Django superuser, once created, you
|
||||
@ -371,22 +394,26 @@ There are a number of other environment variables you can set. These are, mostly
|
||||
**NOT** required to be set in the default container installation, they are really only
|
||||
useful if you are manually installing TubeSync in some other environment. These are:
|
||||
|
||||
| Name | What | Example |
|
||||
| ---------------------------- | ------------------------------------------------------------- |--------------------------------------|
|
||||
| DJANGO_SECRET_KEY | Django's SECRET_KEY | YJySXnQLB7UVZw2dXKDWxI5lEZaImK6l |
|
||||
| DJANGO_URL_PREFIX | Run TubeSync in a sub-URL on the web server | /somepath/ |
|
||||
| TUBESYNC_DEBUG | Enable debugging | True |
|
||||
| TUBESYNC_WORKERS | Number of background workers, default is 2, max allowed is 8 | 2 |
|
||||
| TUBESYNC_HOSTS | Django's ALLOWED_HOSTS, defaults to `*` | tubesync.example.com,otherhost.com |
|
||||
| TUBESYNC_RESET_DOWNLOAD_DIR | Toggle resetting `/downloads` permissions, defaults to True | True |
|
||||
| TUBESYNC_VIDEO_HEIGHT_CUTOFF | Smallest video height in pixels permitted to download | 240 |
|
||||
| TUBESYNC_DIRECTORY_PREFIX | Enable `video` and `audio` directory prefixes in `/downloads` | True |
|
||||
| GUNICORN_WORKERS | Number of gunicorn workers to spawn | 3 |
|
||||
| LISTEN_HOST | IP address for gunicorn to listen on | 127.0.0.1 |
|
||||
| LISTEN_PORT | Port number for gunicorn to listen on | 8080 |
|
||||
| HTTP_USER | Sets the username for HTTP basic authentication | some-username |
|
||||
| HTTP_PASS | Sets the password for HTTP basic authentication | some-secure-password |
|
||||
| DATABASE_CONNECTION | Optional external database connection details | mysql://user:pass@host:port/database |
|
||||
| Name | What | Example |
|
||||
| ---------------------------- | ------------------------------------------------------------- |-------------------------------------------------------------------------------|
|
||||
| DJANGO_SECRET_KEY | Django's SECRET_KEY | YJySXnQLB7UVZw2dXKDWxI5lEZaImK6l |
|
||||
| DJANGO_URL_PREFIX | Run TubeSync in a sub-URL on the web server | /somepath/ |
|
||||
| TUBESYNC_DEBUG | Enable debugging | True |
|
||||
| TUBESYNC_HOSTS | Django's ALLOWED_HOSTS, defaults to `*` | tubesync.example.com,otherhost.com |
|
||||
| TUBESYNC_RESET_DOWNLOAD_DIR | Toggle resetting `/downloads` permissions, defaults to True | True |
|
||||
| TUBESYNC_VIDEO_HEIGHT_CUTOFF | Smallest video height in pixels permitted to download | 240 |
|
||||
| TUBESYNC_RENAME_SOURCES | Rename media files from selected sources | Source1_directory,Source2_directory |
|
||||
| TUBESYNC_RENAME_ALL_SOURCES | Rename media files from all sources | True |
|
||||
| TUBESYNC_DIRECTORY_PREFIX | Enable `video` and `audio` directory prefixes in `/downloads` | True |
|
||||
| TUBESYNC_SHRINK_NEW | Filter unneeded information from newly retrieved metadata | True |
|
||||
| TUBESYNC_SHRINK_OLD | Filter unneeded information from metadata loaded from the database | True |
|
||||
| TUBESYNC_WORKERS | Number of background threads per (task runner) process. Default is 1. Max allowed is 8. | 2 |
|
||||
| GUNICORN_WORKERS | Number of `gunicorn` (web request) workers to spawn | 3 |
|
||||
| LISTEN_HOST | IP address for `gunicorn` to listen on | 127.0.0.1 |
|
||||
| LISTEN_PORT | Port number for `gunicorn` to listen on | 8080 |
|
||||
| HTTP_USER | Sets the username for HTTP basic authentication | some-username |
|
||||
| HTTP_PASS | Sets the password for HTTP basic authentication | some-secure-password |
|
||||
| DATABASE_CONNECTION | Optional external database connection details | postgresql://user:pass@host:port/database |
|
||||
|
||||
|
||||
# Manual, non-containerised, installation
|
||||
@ -396,7 +423,7 @@ following this rough guide, you are on your own and should be knowledgeable abou
|
||||
installing and running WSGI-based Python web applications before attempting this.
|
||||
|
||||
1. Clone or download this repo
|
||||
2. Make sure you're running a modern version of Python (>=3.6) and have Pipenv
|
||||
2. Make sure you're running a modern version of Python (>=3.10) and have Pipenv
|
||||
installed
|
||||
3. Set up the environment with `pipenv install`
|
||||
4. Copy `tubesync/tubesync/local_settings.py.example` to
|
||||
|
@ -5,6 +5,8 @@ worker_processes auto;
|
||||
worker_cpu_affinity auto;
|
||||
pid /run/nginx.pid;
|
||||
|
||||
env YT_POT_BGUTIL_BASE_URL;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
@ -135,4 +137,7 @@ http {
|
||||
}
|
||||
}
|
||||
|
||||
# Proof-of-Origin Token Server
|
||||
include /etc/nginx/token_server.conf;
|
||||
|
||||
}
|
||||
|
29
config/root/etc/nginx/token_server.conf
Normal file
29
config/root/etc/nginx/token_server.conf
Normal file
@ -0,0 +1,29 @@
|
||||
upstream token_server {
|
||||
server 127.0.0.2:4416 down;
|
||||
}
|
||||
|
||||
server {
|
||||
|
||||
# Ports
|
||||
listen 4416;
|
||||
listen [::]:4416;
|
||||
|
||||
# Server domain name
|
||||
server_name _;
|
||||
|
||||
set_by_lua_block $pot_url {
|
||||
local default = 'http://token_server'
|
||||
local url = os.getenv('YT_POT_BGUTIL_BASE_URL')
|
||||
if not url then
|
||||
return default
|
||||
end
|
||||
if #url and url:find('://') then
|
||||
return url
|
||||
end
|
||||
return default
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass $pot_url;
|
||||
}
|
||||
}
|
@ -2,4 +2,15 @@
|
||||
|
||||
cd /
|
||||
|
||||
exec /usr/sbin/nginx
|
||||
https="${TUBESYNC_POT_HTTPS:+https}"
|
||||
ip_address="${TUBESYNC_POT_IPADDR:-${POTSERVER_PORT_4416_TCP_ADDR}}"
|
||||
: "${TUBESYNC_POT_PORT:=${POTSERVER_PORT_4416_TCP_PORT}}"
|
||||
port="${TUBESYNC_POT_PORT:+:}${TUBESYNC_POT_PORT}"
|
||||
|
||||
if [ -n "${ip_address}" ]
|
||||
then
|
||||
YT_POT_BGUTIL_BASE_URL="${https:-http}://${ip_address}${port}"
|
||||
export YT_POT_BGUTIL_BASE_URL
|
||||
fi
|
||||
|
||||
exec /usr/bin/openresty -c /etc/nginx/nginx.conf -e stderr
|
||||
|
@ -2,4 +2,5 @@
|
||||
|
||||
exec nice -n "${TUBESYNC_NICE:-1}" s6-setuidgid app \
|
||||
/usr/bin/python3 /app/manage.py process_tasks \
|
||||
--queue database
|
||||
--queue database --duration 86400 \
|
||||
--sleep "30.${RANDOM}"
|
||||
|
@ -2,4 +2,5 @@
|
||||
|
||||
exec nice -n "${TUBESYNC_NICE:-1}" s6-setuidgid app \
|
||||
/usr/bin/python3 /app/manage.py process_tasks \
|
||||
--queue filesystem
|
||||
--queue filesystem --duration 43200 \
|
||||
--sleep "20.${RANDOM}"
|
||||
|
@ -25,6 +25,13 @@ then
|
||||
chmod -R 0755 /downloads
|
||||
fi
|
||||
|
||||
if [ 'True' = "${TUBESYNC_DEBUG:-False}" ]
|
||||
then
|
||||
s6-setuidgid app \
|
||||
/usr/bin/python3 /app/manage.py \
|
||||
showmigrations -v 3 --list
|
||||
fi
|
||||
|
||||
# Run migrations
|
||||
exec s6-setuidgid app \
|
||||
/usr/bin/python3 /app/manage.py migrate
|
||||
|
@ -2,4 +2,5 @@
|
||||
|
||||
exec nice -n "${TUBESYNC_NICE:-1}" s6-setuidgid app \
|
||||
/usr/bin/python3 /app/manage.py process_tasks \
|
||||
--queue network
|
||||
--queue network --duration 43200 \
|
||||
--sleep "10.${RANDOM}"
|
||||
|
@ -18,22 +18,73 @@ reset your database. If you are comfortable with Django you can export and re-im
|
||||
existing database data with:
|
||||
|
||||
```bash
|
||||
$ docker exec -i tubesync python3 /app/manage.py dumpdata > some-file.json
|
||||
# Stop services
|
||||
$ docker exec -t tubesync \
|
||||
bash -c 'for svc in \
|
||||
/run/service/{gunicorn,tubesync*-worker} ; \
|
||||
do \
|
||||
/command/s6-svc -wd -D "${svc}" ; \
|
||||
done'
|
||||
# Backup the database into a compressed file
|
||||
$ docker exec -t tubesync \
|
||||
python3 /app/manage.py \
|
||||
dumpdata --format jsonl \
|
||||
--exclude background_task \
|
||||
--output /downloads/tubesync-database-backup.jsonl.xz
|
||||
```
|
||||
|
||||
Then change you database backend over, then use
|
||||
Writing the compressed backup file to your `/downloads/` makes sense, as long as that directory is still available after destroying the current container.
|
||||
If you have a configuration where that file will be deleted, choose a different place to store the output (perhaps `/config/`, if it has sufficient storage available) and place the file there instead.
|
||||
|
||||
You can also copy the file from the container to the local filesystem (`/tmp/` in this example) with:
|
||||
|
||||
```bash
|
||||
$ cat some-file.json | docker exec -i tubesync python3 /app/manage.py loaddata - --format=json
|
||||
$ docker cp \
|
||||
tubesync:/downloads/tubesync-database-backup.jsonl.xz \
|
||||
/tmp/
|
||||
```
|
||||
|
||||
If you use `-` as the destination, then `docker cp` provides a `tar` archive.
|
||||
|
||||
After you have changed your database backend over, then use:
|
||||
|
||||
```bash
|
||||
# Stop services
|
||||
$ docker exec -t tubesync \
|
||||
bash -c 'for svc in \
|
||||
/run/service/{gunicorn,tubesync*-worker} ; \
|
||||
do \
|
||||
/command/s6-svc -wd -D "${svc}" ; \
|
||||
done'
|
||||
# Load fixture file into the database
|
||||
$ docker exec -t tubesync \
|
||||
python3 /app/manage.py \
|
||||
loaddata /downloads/tubesync-database-backup.jsonl.xz
|
||||
```
|
||||
|
||||
Or, if you only have the copy in `/tmp/`, then you would use:
|
||||
```bash
|
||||
# Stop services
|
||||
$ docker exec -t tubesync \
|
||||
bash -c 'for svc in \
|
||||
/run/service/{gunicorn,tubesync*-worker} ; \
|
||||
do \
|
||||
/command/s6-svc -wd -D "${svc}" ; \
|
||||
done'
|
||||
# Load fixture data from standard input into the database
|
||||
$ xzcat /tmp/tubesync-database-backup.jsonl.xz | \
|
||||
docker exec -i tubesync \
|
||||
python3 /app/manage.py \
|
||||
loaddata --format=jsonl -
|
||||
```
|
||||
|
||||
As detailed in the Django documentation:
|
||||
|
||||
https://docs.djangoproject.com/en/3.1/ref/django-admin/#dumpdata
|
||||
https://docs.djangoproject.com/en/5.1/ref/django-admin/#dumpdata
|
||||
|
||||
and:
|
||||
|
||||
https://docs.djangoproject.com/en/3.1/ref/django-admin/#loaddata
|
||||
https://docs.djangoproject.com/en/5.1/ref/django-admin/#loaddata
|
||||
|
||||
Further instructions are beyond the scope of TubeSync documenation and you should refer
|
||||
to Django documentation for more details.
|
||||
@ -94,13 +145,13 @@ the DB for the performance benefits, a configuration like this would be enough:
|
||||
|
||||
```
|
||||
tubesync-db:
|
||||
image: postgres:15.2
|
||||
image: postgres:17
|
||||
container_name: tubesync-db
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /<path/to>/init.sql:/docker-entrypoint-initdb.d/init.sql
|
||||
- /<path/to>/tubesync-db:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_DB=tubesync
|
||||
- POSTGRES_USER=postgres
|
||||
- POSTGRES_PASSWORD=testpassword
|
||||
|
||||
@ -118,15 +169,3 @@ the DB for the performance benefits, a configuration like this would be enough:
|
||||
depends_on:
|
||||
- tubesync-db
|
||||
```
|
||||
|
||||
Note that an `init.sql` file is needed to initialize the `tubesync`
|
||||
database before it can be written to. This file should contain:
|
||||
|
||||
```
|
||||
CREATE DATABASE tubesync;
|
||||
```
|
||||
|
||||
|
||||
Then it must be mapped to `/docker-entrypoint-initdb.d/init.sql` for it
|
||||
to be executed on first startup of the container. See the `tubesync-db`
|
||||
volume mapping above for how to do this.
|
||||
|
@ -22,6 +22,13 @@ class NoMetadataException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class NoThumbnailException(Exception):
|
||||
'''
|
||||
Raised when a thumbnail was not found at the remote URL.
|
||||
'''
|
||||
pass
|
||||
|
||||
|
||||
class DownloadFailedException(Exception):
|
||||
'''
|
||||
Raised when a downloaded media file is expected to be present, but doesn't
|
||||
|
16
tubesync/common/json.py
Normal file
16
tubesync/common/json.py
Normal file
@ -0,0 +1,16 @@
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
|
||||
|
||||
class JSONEncoder(DjangoJSONEncoder):
|
||||
item_separator = ','
|
||||
key_separator = ':'
|
||||
|
||||
def default(self, obj):
|
||||
try:
|
||||
iterable = iter(obj)
|
||||
except TypeError:
|
||||
pass
|
||||
else:
|
||||
return list(iterable)
|
||||
return super().default(obj)
|
||||
|
29
tubesync/common/timestamp.py
Normal file
29
tubesync/common/timestamp.py
Normal file
@ -0,0 +1,29 @@
|
||||
import datetime
|
||||
|
||||
|
||||
utc_tz = datetime.timezone.utc
|
||||
posix_epoch = datetime.datetime.fromtimestamp(0, utc_tz)
|
||||
|
||||
|
||||
def add_epoch(seconds):
|
||||
assert seconds is not None
|
||||
assert seconds >= 0, 'seconds must be a positive number'
|
||||
|
||||
return datetime.timedelta(seconds=seconds) + posix_epoch
|
||||
|
||||
def subtract_epoch(arg_dt, /):
|
||||
assert isinstance(arg_dt, datetime.datetime)
|
||||
utc_dt = arg_dt.astimezone(utc_tz)
|
||||
|
||||
return utc_dt - posix_epoch
|
||||
|
||||
def datetime_to_timestamp(arg_dt, /, *, integer=True):
|
||||
timestamp = subtract_epoch(arg_dt).total_seconds()
|
||||
|
||||
if not integer:
|
||||
return timestamp
|
||||
return round(timestamp)
|
||||
|
||||
def timestamp_to_datetime(seconds, /):
|
||||
return add_epoch(seconds=seconds).astimezone(utc_tz)
|
||||
|
@ -1,11 +1,13 @@
|
||||
import cProfile
|
||||
import emoji
|
||||
import gc
|
||||
import io
|
||||
import os
|
||||
import pstats
|
||||
import string
|
||||
import time
|
||||
from datetime import datetime
|
||||
from django.core.paginator import Paginator
|
||||
from urllib.parse import urlunsplit, urlencode, urlparse
|
||||
from yt_dlp.utils import LazyList
|
||||
from .errors import DatabaseConnectionError
|
||||
@ -222,3 +224,36 @@ def remove_enclosed(haystack, /, open='[', close=']', sep=' ', *, valid=None, st
|
||||
return haystack
|
||||
return haystack[:o] + haystack[len(n)+c:]
|
||||
|
||||
|
||||
def django_queryset_generator(query_set, /, *,
|
||||
page_size=100,
|
||||
chunk_size=None,
|
||||
use_chunked_fetch=False,
|
||||
):
|
||||
qs = query_set.values_list('pk', flat=True)
|
||||
# Avoid the `UnorderedObjectListWarning`
|
||||
if not query_set.ordered:
|
||||
qs = qs.order_by('pk')
|
||||
collecting = gc.isenabled()
|
||||
gc.disable()
|
||||
if use_chunked_fetch:
|
||||
for key in qs._iterator(use_chunked_fetch, chunk_size):
|
||||
yield query_set.filter(pk=key)[0]
|
||||
key = None
|
||||
gc.collect(generation=1)
|
||||
key = None
|
||||
else:
|
||||
for page in iter(Paginator(qs, page_size)):
|
||||
for key in page.object_list:
|
||||
yield query_set.filter(pk=key)[0]
|
||||
key = None
|
||||
gc.collect(generation=1)
|
||||
key = None
|
||||
page = None
|
||||
gc.collect()
|
||||
page = None
|
||||
qs = None
|
||||
gc.collect()
|
||||
if collecting:
|
||||
gc.enable()
|
||||
|
||||
|
26
tubesync/restart_services.sh
Executable file
26
tubesync/restart_services.sh
Executable file
@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
dir='/run/service'
|
||||
svc_path() (
|
||||
cd "${dir}"
|
||||
realpath -e -s "$@"
|
||||
)
|
||||
|
||||
if [ 0 -eq $# ]
|
||||
then
|
||||
set -- \
|
||||
$( cd "${dir}" && svc_path tubesync*-worker ) \
|
||||
"$( svc_path gunicorn )" \
|
||||
"$( svc_path nginx )"
|
||||
fi
|
||||
|
||||
for service in $( svc_path "$@" )
|
||||
do
|
||||
printf -- 'Restarting %-28s' "${service#${dir}/}..."
|
||||
_began="$( date '+%s' )"
|
||||
/command/s6-svc -wr -r "${service}"
|
||||
_ended="$( date '+%s' )"
|
||||
printf -- '\tcompleted (in %2.1d seconds).\n' \
|
||||
"$( expr "${_ended}" - "${_began}" )"
|
||||
done
|
||||
unset -v _began _ended service
|
@ -1,5 +1,11 @@
|
||||
from django.contrib import admin
|
||||
from .models import Source, Media, MediaServer
|
||||
from .models import (
|
||||
Source,
|
||||
Media,
|
||||
Metadata,
|
||||
MetadataFormat,
|
||||
MediaServer
|
||||
)
|
||||
|
||||
|
||||
@admin.register(Source)
|
||||
@ -21,6 +27,24 @@ class MediaAdmin(admin.ModelAdmin):
|
||||
search_fields = ('uuid', 'source__key', 'key')
|
||||
|
||||
|
||||
@admin.register(Metadata)
|
||||
class MetadataAdmin(admin.ModelAdmin):
|
||||
|
||||
ordering = ('-retrieved', '-created', '-uploaded')
|
||||
list_display = ('uuid', 'key', 'retrieved', 'uploaded', 'created', 'site')
|
||||
readonly_fields = ('uuid', 'created', 'retrieved')
|
||||
search_fields = ('uuid', 'media__uuid', 'key')
|
||||
|
||||
|
||||
@admin.register(MetadataFormat)
|
||||
class MetadataFormatAdmin(admin.ModelAdmin):
|
||||
|
||||
ordering = ('site', 'key', 'number')
|
||||
list_display = ('uuid', 'key', 'site', 'number', 'metadata')
|
||||
readonly_fields = ('uuid', 'metadata', 'site', 'key', 'number')
|
||||
search_fields = ('uuid', 'metadata__uuid', 'metadata__media__uuid', 'key')
|
||||
|
||||
|
||||
@admin.register(MediaServer)
|
||||
class MediaServerAdmin(admin.ModelAdmin):
|
||||
|
||||
|
@ -8,6 +8,7 @@ DOMAINS = dict({
|
||||
'youtube': frozenset({
|
||||
'youtube.com',
|
||||
'm.youtube.com',
|
||||
'music.youtube.com',
|
||||
'www.youtube.com',
|
||||
}),
|
||||
})
|
||||
|
@ -75,6 +75,26 @@ class CommaSepChoiceField(models.CharField):
|
||||
|
||||
# Override these functions to prevent unwanted behaviors
|
||||
def to_python(self, value):
|
||||
saved_value = None
|
||||
arg_was_none = True if value is None else False
|
||||
if isinstance(value, CommaSepChoice):
|
||||
return value.selected_choices
|
||||
if isinstance(value, list) and len(value) > 0 and value[0].startswith('CommaSepChoice('):
|
||||
saved_value = value
|
||||
value = ''.join(value)
|
||||
if isinstance(value, str) and value.startswith('CommaSepChoice('):
|
||||
r = value.replace('CommaSepChoice(', 'dict(', 1)
|
||||
try:
|
||||
o = eval(r)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
return o.get('selected_choices')
|
||||
if arg_was_none:
|
||||
value = None
|
||||
elif saved_value is not None:
|
||||
value = saved_value
|
||||
self.log.debug(f'CommaSepChoiceField: to_python: was called with: {value!r}')
|
||||
return value
|
||||
|
||||
def get_internal_type(self):
|
||||
@ -128,8 +148,13 @@ class CommaSepChoiceField(models.CharField):
|
||||
'''
|
||||
Create a data structure to be used in Python code.
|
||||
'''
|
||||
# possibly not useful?
|
||||
if isinstance(value, CommaSepChoice):
|
||||
value = value.selected_choices
|
||||
# normally strings
|
||||
if isinstance(value, str) and len(value) > 0:
|
||||
value = value.split(self.separator)
|
||||
# empty string and None, or whatever
|
||||
if not isinstance(value, list):
|
||||
value = list()
|
||||
self.selected_choices = value
|
||||
@ -154,6 +179,15 @@ class CommaSepChoiceField(models.CharField):
|
||||
if set(s_value) != set(value):
|
||||
self.log.warn(f'CommaSepChoiceField:get_prep_value: values did not match. '
|
||||
f'CommaSepChoiceField({value}) versus CharField({s_value})')
|
||||
return self.__class__._tuple__str__(data)
|
||||
|
||||
|
||||
# extra functions not used by any parent classes
|
||||
@staticmethod
|
||||
def _tuple__str__(data):
|
||||
if not isinstance(data, CommaSepChoice):
|
||||
return data
|
||||
value = data.selected_choices
|
||||
if not isinstance(value, list):
|
||||
return ''
|
||||
if data.all_choice in value:
|
||||
@ -161,7 +195,6 @@ class CommaSepChoiceField(models.CharField):
|
||||
ordered_unique = list(dict.fromkeys(value))
|
||||
return data.separator.join(ordered_unique)
|
||||
|
||||
# extra functions not used by any parent classes
|
||||
def get_all_choices(self):
|
||||
choice_list = list()
|
||||
if self.possible_choices is None:
|
||||
@ -174,3 +207,6 @@ class CommaSepChoiceField(models.CharField):
|
||||
|
||||
return choice_list
|
||||
|
||||
|
||||
CommaSepChoice.__str__ = CommaSepChoiceField._tuple__str__
|
||||
|
||||
|
@ -1,8 +1,14 @@
|
||||
|
||||
from django import forms
|
||||
from django import forms, VERSION as DJANGO_VERSION
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
if DJANGO_VERSION[0:3] < (5, 0, 0):
|
||||
_assume_scheme = dict()
|
||||
else:
|
||||
# Silence RemovedInDjango60Warning
|
||||
_assume_scheme = dict(assume_scheme='http')
|
||||
|
||||
class ValidateSourceForm(forms.Form):
|
||||
|
||||
source_type = forms.CharField(
|
||||
@ -12,7 +18,8 @@ class ValidateSourceForm(forms.Form):
|
||||
)
|
||||
source_url = forms.URLField(
|
||||
label=_('Source URL'),
|
||||
required=True
|
||||
required=True,
|
||||
**_assume_scheme,
|
||||
)
|
||||
|
||||
|
||||
@ -44,10 +51,33 @@ class ResetTasksForm(forms.Form):
|
||||
pass
|
||||
|
||||
|
||||
class ScheduleTaskForm(forms.Form):
|
||||
|
||||
now = forms.DateTimeField(
|
||||
label=_('The current date and time'),
|
||||
required=False,
|
||||
widget=forms.DateTimeInput(
|
||||
attrs={
|
||||
'type': 'datetime-local',
|
||||
'readonly': 'true',
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
when = forms.DateTimeField(
|
||||
label=_('When the task should run'),
|
||||
required=True,
|
||||
widget=forms.DateTimeInput(
|
||||
attrs={'type': 'datetime-local'},
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class ConfirmDeleteMediaServerForm(forms.Form):
|
||||
|
||||
pass
|
||||
|
||||
|
||||
_media_server_type_label = 'Jellyfin'
|
||||
class JellyfinMediaServerForm(forms.Form):
|
||||
|
||||
|
@ -2,7 +2,7 @@ import os
|
||||
import uuid
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db.models import signals
|
||||
from django.db.transaction import atomic
|
||||
from common.logger import log
|
||||
from sync.models import Source, Media, MediaServer
|
||||
from sync.tasks import schedule_media_servers_update
|
||||
@ -10,10 +10,10 @@ from sync.tasks import schedule_media_servers_update
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
help = ('Deletes a source by UUID')
|
||||
help = _('Deletes a source by UUID')
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--source', action='store', required=True, help='Source UUID')
|
||||
parser.add_argument('--source', action='store', required=True, help=_('Source UUID'))
|
||||
|
||||
def handle(self, *args, **options):
|
||||
source_uuid_str = options.get('source', '')
|
||||
@ -29,13 +29,15 @@ class Command(BaseCommand):
|
||||
raise CommandError(f'Source does not exist with '
|
||||
f'UUID: {source_uuid}')
|
||||
# Reconfigure the source to not update the disk or media servers
|
||||
source.deactivate()
|
||||
with atomic(durable=True):
|
||||
source.deactivate()
|
||||
# Delete the source, triggering pre-delete signals for each media item
|
||||
log.info(f'Found source with UUID "{source.uuid}" with name '
|
||||
f'"{source.name}" and deleting it, this may take some time!')
|
||||
log.info(f'Source directory: {source.directory_path}')
|
||||
source.delete()
|
||||
# Update any media servers
|
||||
schedule_media_servers_update()
|
||||
with atomic(durable=True):
|
||||
source.delete()
|
||||
# Update any media servers
|
||||
schedule_media_servers_update()
|
||||
# All done
|
||||
log.info('Done')
|
||||
|
256
tubesync/sync/management/commands/fix-mariadb.py
Normal file
256
tubesync/sync/management/commands/fix-mariadb.py
Normal file
@ -0,0 +1,256 @@
|
||||
from django import db
|
||||
from io import BytesIO, TextIOWrapper
|
||||
from django.utils.translation import gettext_lazy
|
||||
from django.core.management import call_command
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from common.logger import log
|
||||
|
||||
|
||||
db_tables = db.connection.introspection.table_names
|
||||
db_quote_name = db.connection.ops.quote_name
|
||||
new_tables = {
|
||||
'sync_media_metadata_format',
|
||||
'sync_media_metadata',
|
||||
'sync_metadataformat',
|
||||
'sync_metadata',
|
||||
}
|
||||
sql_statements = db.connection.ops.prepare_sql_script
|
||||
|
||||
def _(arg_str):
|
||||
return str(gettext_lazy(arg_str))
|
||||
|
||||
def SQLTable(arg_table):
|
||||
assert isinstance(arg_table, str), type(arg_table)
|
||||
needle = arg_table
|
||||
if needle.startswith('new__'):
|
||||
needle = arg_table[len('new__'):]
|
||||
db.connection.ensure_connection()
|
||||
valid_table_name = (
|
||||
needle in new_tables and
|
||||
arg_table in db_tables(include_views=False)
|
||||
)
|
||||
if not valid_table_name:
|
||||
raise ValueError(_('Invalid table name'))
|
||||
return str(arg_table)
|
||||
|
||||
def _mk_wrapper():
|
||||
return TextIOWrapper(
|
||||
BytesIO(),
|
||||
line_buffering=True,
|
||||
write_through=True,
|
||||
)
|
||||
|
||||
def check_migration_status(migration_str, /, *, needle=None):
|
||||
if needle is None:
|
||||
needle = 'No planned migration operations.'
|
||||
wrap_stderr, wrap_stdout = _mk_wrapper(), _mk_wrapper()
|
||||
try:
|
||||
call_command(
|
||||
'migrate', '-v', '3', '--plan', 'sync',
|
||||
migration_str,
|
||||
stderr=wrap_stderr,
|
||||
stdout=wrap_stdout,
|
||||
)
|
||||
except db.migrations.exceptions.NodeNotFoundError:
|
||||
return (False, None, None,)
|
||||
wrap_stderr.seek(0, 0)
|
||||
stderr_lines = wrap_stderr.readlines()
|
||||
wrap_stdout.seek(0, 0)
|
||||
stdout_lines = wrap_stdout.readlines()
|
||||
return (
|
||||
bool([ line for line in stdout_lines if needle in line ]),
|
||||
stderr_lines,
|
||||
stdout_lines,
|
||||
)
|
||||
|
||||
def db_columns(table_str, /):
|
||||
columns = list()
|
||||
db_gtd = db.connection.introspection.get_table_description
|
||||
with db.connection.cursor() as cursor:
|
||||
columns.extend(db_gtd(cursor, table_str))
|
||||
return columns
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
help = _('Fixes MariaDB database issues')
|
||||
output_transaction = True
|
||||
requires_migrations_checks = False
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
'--dry-run',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help=_('Only show the SQL; do not apply it to the database'),
|
||||
)
|
||||
parser.add_argument(
|
||||
'--uuid-columns',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help=_('Switch to the native UUID column type'),
|
||||
)
|
||||
parser.add_argument(
|
||||
'--delete-table',
|
||||
action='append',
|
||||
default=list(),
|
||||
metavar='TABLE',
|
||||
type=SQLTable,
|
||||
help=_('SQL table name to be deleted'),
|
||||
)
|
||||
|
||||
def _using_char(self, table_str, column_str='uuid', /):
|
||||
cols = db_columns(table_str)
|
||||
char_sizes = { 32, 36, }
|
||||
char_types = { 'char', 'varchar', }
|
||||
return column_str in [
|
||||
c.name for c in cols if c.data_type in char_types and c.display_size in char_sizes
|
||||
]
|
||||
|
||||
def _column_type(self, table_str, column_str='uuid', /):
|
||||
cols = db_columns(table_str)
|
||||
found = [
|
||||
f'{c.data_type}({c.display_size})' for c in cols if column_str.lower() == c.name.lower()
|
||||
]
|
||||
if not found:
|
||||
return str()
|
||||
return found[0]
|
||||
|
||||
def handle(self, *args, **options):
|
||||
if 'mysql' != db.connection.vendor:
|
||||
raise CommandError(
|
||||
_('An invalid database vendor is configured')
|
||||
+ f': {db.connection.vendor}'
|
||||
)
|
||||
|
||||
db.connection.ensure_connection()
|
||||
db_is_mariadb = (
|
||||
hasattr(db.connection, 'mysql_is_mariadb') and
|
||||
db.connection.is_usable() and
|
||||
db.connection.mysql_is_mariadb
|
||||
)
|
||||
if not db_is_mariadb:
|
||||
raise CommandError(_('Not conbected to a MariaDB database server.'))
|
||||
|
||||
display_name = db.connection.display_name
|
||||
table_names = options.get('delete_table')
|
||||
schema = db.connection.schema_editor(collect_sql=True)
|
||||
quote_name = schema.quote_name
|
||||
|
||||
log.info('Start')
|
||||
|
||||
|
||||
if options['uuid_columns']:
|
||||
if 'uuid' != db.connection.data_types.get('UUIDField', ''):
|
||||
raise CommandError(_(
|
||||
f'The {display_name} database server does not support UUID columns.'
|
||||
))
|
||||
uuid_column_type_str = 'uuid(36)'
|
||||
both_tables = (
|
||||
self._using_char('sync_source', 'uuid') and
|
||||
self._using_char('sync_media', 'uuid')
|
||||
)
|
||||
if not both_tables:
|
||||
if uuid_column_type_str == self._column_type('sync_source', 'uuid').lower():
|
||||
log.info('The source table is already using a native UUID column.')
|
||||
elif uuid_column_type_str == self._column_type('sync_media', 'uuid').lower():
|
||||
log.info('The media table is already using a native UUID column.')
|
||||
elif uuid_column_type_str == self._column_type('sync_media', 'source_id').lower():
|
||||
log.info('The media table is already using a native UUID column.')
|
||||
else:
|
||||
raise CommandError(_(
|
||||
'The database is not in an appropriate state to switch to '
|
||||
'native UUID columns. Manual intervention is required.'
|
||||
))
|
||||
else:
|
||||
media_table_str = quote_name('sync_media')
|
||||
source_table_str = quote_name('sync_source')
|
||||
fk_name_str = quote_name('sync_media_source_id_36827e1d_fk_sync_source_uuid')
|
||||
source_id_column_str = quote_name('source_id')
|
||||
uuid_column_str = quote_name('uuid')
|
||||
uuid_type_str = 'uuid'.upper()
|
||||
remove_fk = schema.sql_delete_fk % dict(
|
||||
table=media_table_str,
|
||||
name=fk_name_str,
|
||||
)
|
||||
add_fk = schema.sql_create_fk % dict(
|
||||
table=media_table_str,
|
||||
name=fk_name_str,
|
||||
column=source_id_column_str,
|
||||
to_table=source_table_str,
|
||||
to_column=uuid_column_str,
|
||||
deferrable='',
|
||||
)
|
||||
|
||||
schema.execute(
|
||||
schema.sql_alter_column % dict(
|
||||
table=media_table_str,
|
||||
changes=schema.sql_alter_column_not_null % dict(
|
||||
type=uuid_type_str,
|
||||
column=uuid_column_str,
|
||||
),
|
||||
),
|
||||
None,
|
||||
)
|
||||
schema.execute(remove_fk, None)
|
||||
schema.execute(
|
||||
schema.sql_alter_column % dict(
|
||||
table=source_table_str,
|
||||
changes=schema.sql_alter_column_not_null % dict(
|
||||
type=uuid_type_str,
|
||||
column=uuid_column_str,
|
||||
),
|
||||
),
|
||||
None,
|
||||
)
|
||||
schema.execute(
|
||||
schema.sql_alter_column % dict(
|
||||
table=media_table_str,
|
||||
changes=schema.sql_alter_column_not_null % dict(
|
||||
type=uuid_type_str,
|
||||
column=source_id_column_str,
|
||||
),
|
||||
),
|
||||
None,
|
||||
)
|
||||
schema.execute(add_fk, None)
|
||||
|
||||
|
||||
if table_names:
|
||||
# Check that the migration is at an appropriate step
|
||||
at_30, err_30, out_30 = check_migration_status( '0030_alter_source_source_vcodec' )
|
||||
at_31, err_31, out_31 = check_migration_status( '0031_metadata_metadataformat' )
|
||||
at_31s, err_31s, out_31s = check_migration_status( '0031_squashed_metadata_metadataformat' )
|
||||
after_31, err_31a, out_31a = check_migration_status(
|
||||
'0031_metadata_metadataformat',
|
||||
needle='Undo Rename table for metadata to sync_media_metadata',
|
||||
)
|
||||
|
||||
should_delete = (
|
||||
not (at_31s or after_31) and
|
||||
(at_30 or at_31)
|
||||
)
|
||||
if not should_delete:
|
||||
raise CommandError(_(
|
||||
'Deleting metadata tables that are in use is not safe!'
|
||||
))
|
||||
|
||||
for table in table_names:
|
||||
schema.execute(
|
||||
schema.sql_delete_table % dict(
|
||||
table=quote_name(table),
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
if options['dry_run']:
|
||||
log.info('Done')
|
||||
return '\n'.join(schema.collected_sql)
|
||||
else:
|
||||
with db.connection.schema_editor(collect_sql=False) as schema_editor:
|
||||
for sql in schema.collected_sql:
|
||||
schema_editor.execute(sql, None)
|
||||
|
||||
|
||||
# All done
|
||||
log.info('Done')
|
@ -2,6 +2,7 @@ import os
|
||||
from pathlib import Path
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from common.logger import log
|
||||
from common.timestamp import timestamp_to_datetime
|
||||
from sync.choices import FileExtension
|
||||
from sync.models import Source, Media
|
||||
|
||||
@ -55,11 +56,11 @@ class Command(BaseCommand):
|
||||
item.downloaded = True
|
||||
item.downloaded_filesize = Path(filepath).stat().st_size
|
||||
# set a reasonable download date
|
||||
date = item.metadata_published(Path(filepath).stat().st_mtime)
|
||||
date = timestamp_to_datetime(Path(filepath).stat().st_mtime)
|
||||
if item.published and item.published > date:
|
||||
date = item.published
|
||||
if item.has_metadata:
|
||||
metadata_date = item.metadata_published(item.get_metadata_first_value('epoch', 0))
|
||||
metadata_date = timestamp_to_datetime(item.get_metadata_first_value('epoch', 0))
|
||||
if metadata_date and metadata_date > date:
|
||||
date = metadata_date
|
||||
if item.download_date and item.download_date > date:
|
||||
|
@ -1,5 +1,6 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from sync.models import Media
|
||||
from common.utils import django_queryset_generator as qs_gen
|
||||
from sync.models import Media, Metadata
|
||||
|
||||
|
||||
from common.logger import log
|
||||
@ -10,10 +11,10 @@ class Command(BaseCommand):
|
||||
help = 'Resets all media item metadata'
|
||||
|
||||
def handle(self, *args, **options):
|
||||
log.info('Resettings all media metadata...')
|
||||
log.info('Resetting all media metadata...')
|
||||
# Delete all metadata
|
||||
Media.objects.update(metadata=None)
|
||||
Metadata.objects.all().delete()
|
||||
# Trigger the save signal on each media item
|
||||
for item in Media.objects.all():
|
||||
item.save()
|
||||
for media in qs_gen(Media.objects.filter(metadata__isnull=False)):
|
||||
media.metadata_clear(save=True)
|
||||
log.info('Done')
|
||||
|
@ -3,7 +3,7 @@ from django.db.transaction import atomic
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from background_task.models import Task
|
||||
from sync.models import Source
|
||||
from sync.tasks import index_source_task
|
||||
from sync.tasks import index_source_task, check_source_directory_exists
|
||||
|
||||
|
||||
from common.logger import log
|
||||
|
@ -121,17 +121,20 @@ def get_best_video_format(media):
|
||||
return False, False
|
||||
video_formats = multi_key_sort(video_formats, sort_keys, True)
|
||||
source_resolution = media.source.source_resolution.strip().upper()
|
||||
source_resolution_height = media.source.source_resolution_height
|
||||
source_vcodec = media.source.source_vcodec
|
||||
exact_match, best_match = None, None
|
||||
for fmt in video_formats:
|
||||
# format_note was blank, match height instead
|
||||
if '' == fmt['format'] and fmt['height'] == media.source.source_resolution_height:
|
||||
fmt['format'] = source_resolution
|
||||
def matched_resolution(fmt):
|
||||
if fmt['format'] == source_resolution:
|
||||
return True
|
||||
elif fmt['height'] == source_resolution_height:
|
||||
return True
|
||||
return False
|
||||
# Of our filtered video formats, check for resolution + codec + hdr + fps match
|
||||
if media.source.prefer_60fps and media.source.prefer_hdr:
|
||||
for fmt in video_formats:
|
||||
# Check for an exact match
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec'] and
|
||||
fmt['is_hdr'] and
|
||||
fmt['is_60fps']):
|
||||
@ -142,7 +145,7 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for a resolution, hdr and fps match but drop the codec
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
fmt['is_hdr'] and fmt['is_60fps']):
|
||||
# Close match
|
||||
exact_match, best_match = False, fmt
|
||||
@ -158,7 +161,7 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for resolution, codec and 60fps match
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec'] and
|
||||
fmt['is_60fps']):
|
||||
exact_match, best_match = False, fmt
|
||||
@ -166,21 +169,21 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for resolution and hdr match
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
fmt['is_hdr']):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for resolution and 60fps match
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
fmt['is_60fps']):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for resolution, codec and hdr match
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec'] and
|
||||
fmt['is_hdr']):
|
||||
exact_match, best_match = False, fmt
|
||||
@ -188,14 +191,20 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for resolution and codec
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec']):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for resolution
|
||||
if source_resolution == fmt['format']:
|
||||
if matched_resolution(fmt):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for codec
|
||||
if (source_vcodec == fmt['vcodec']):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
@ -205,7 +214,7 @@ def get_best_video_format(media):
|
||||
if media.source.prefer_60fps and not media.source.prefer_hdr:
|
||||
for fmt in video_formats:
|
||||
# Check for an exact match
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec'] and
|
||||
fmt['is_60fps'] and
|
||||
not fmt['is_hdr']):
|
||||
@ -216,7 +225,7 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for a resolution and fps match but drop the codec
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
fmt['is_60fps'] and
|
||||
not fmt['is_hdr']):
|
||||
exact_match, best_match = False, fmt
|
||||
@ -239,7 +248,7 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for codec and resolution match but drop 60fps
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec'] and
|
||||
not fmt['is_hdr']):
|
||||
exact_match, best_match = False, fmt
|
||||
@ -247,14 +256,20 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for codec and resolution match only
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec']):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for resolution
|
||||
if source_resolution == fmt['format']:
|
||||
if matched_resolution(fmt):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for codec
|
||||
if (source_vcodec == fmt['vcodec']):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
@ -264,7 +279,7 @@ def get_best_video_format(media):
|
||||
elif media.source.prefer_hdr and not media.source.prefer_60fps:
|
||||
for fmt in video_formats:
|
||||
# Check for an exact match
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec'] and
|
||||
fmt['is_hdr']):
|
||||
# Exact match
|
||||
@ -274,7 +289,7 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for a resolution and fps match but drop the codec
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
fmt['is_hdr'] and
|
||||
not fmt['is_60fps']):
|
||||
exact_match, best_match = False, fmt
|
||||
@ -297,7 +312,7 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for codec and resolution match but drop hdr
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec'] and
|
||||
not fmt['is_60fps']):
|
||||
exact_match, best_match = False, fmt
|
||||
@ -305,14 +320,20 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for codec and resolution match only
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec']):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for resolution
|
||||
if source_resolution == fmt['format']:
|
||||
if matched_resolution(fmt):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for codec
|
||||
if (source_vcodec == fmt['vcodec']):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
@ -322,7 +343,7 @@ def get_best_video_format(media):
|
||||
elif not media.source.prefer_hdr and not media.source.prefer_60fps:
|
||||
for fmt in video_formats:
|
||||
# Check for an exact match
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec'] and
|
||||
not fmt['is_60fps'] and
|
||||
not fmt['is_hdr']):
|
||||
@ -333,7 +354,7 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for a resolution, hdr and fps match but drop the codec
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
not fmt['is_hdr'] and not fmt['is_60fps']):
|
||||
# Close match
|
||||
exact_match, best_match = False, fmt
|
||||
@ -349,7 +370,7 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for resolution, codec and hdr match
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec'] and
|
||||
not fmt['is_hdr']):
|
||||
exact_match, best_match = False, fmt
|
||||
@ -357,7 +378,7 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for resolution, codec and 60fps match
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec'] and
|
||||
not fmt['is_60fps']):
|
||||
exact_match, best_match = False, fmt
|
||||
@ -365,21 +386,27 @@ def get_best_video_format(media):
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for resolution and codec
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
source_vcodec == fmt['vcodec']):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for resolution and not hdr
|
||||
if (source_resolution == fmt['format'] and
|
||||
if (matched_resolution(fmt) and
|
||||
not fmt['is_hdr']):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for resolution
|
||||
if source_resolution == fmt['format']:
|
||||
if matched_resolution(fmt):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
for fmt in video_formats:
|
||||
# Check for codec
|
||||
if (source_vcodec == fmt['vcodec']):
|
||||
exact_match, best_match = False, fmt
|
||||
break
|
||||
if not best_match:
|
||||
|
@ -5,6 +5,7 @@ from django.forms import ValidationError
|
||||
from urllib.parse import urlsplit, urlunsplit, urlencode
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from common.logger import log
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class MediaServerError(Exception):
|
||||
@ -18,14 +19,52 @@ class MediaServer:
|
||||
|
||||
TIMEOUT = 0
|
||||
HELP = ''
|
||||
default_headers = {'User-Agent': 'TubeSync'}
|
||||
|
||||
def __init__(self, mediaserver_instance):
|
||||
self.object = mediaserver_instance
|
||||
self.headers = dict(**self.default_headers)
|
||||
self.token = None
|
||||
|
||||
def make_request_args(self, uri='/', token_header=None, headers={}, token_param=None, params={}):
|
||||
base_parts = urlsplit(self.object.url)
|
||||
if self.token is None:
|
||||
self.token = self.object.options['token'] or None
|
||||
if token_header and self.token:
|
||||
headers.update({token_header: self.token})
|
||||
self.headers.update(headers)
|
||||
if token_param and self.token:
|
||||
params.update({token_param: self.token})
|
||||
qs = urlencode(params)
|
||||
enable_verify = (
|
||||
base_parts.scheme.endswith('s') and
|
||||
self.object.verify_https
|
||||
)
|
||||
url = urlunsplit((base_parts.scheme, base_parts.netloc, uri, qs, ''))
|
||||
return (url, dict(
|
||||
headers=self.headers,
|
||||
verify=enable_verify,
|
||||
timeout=self.TIMEOUT,
|
||||
))
|
||||
|
||||
def make_request(self, uri='/', /, *, headers={}, params={}):
|
||||
'''
|
||||
A very simple implementation is:
|
||||
url, kwargs = self.make_request_args(uri=uri, headers=headers, params=params)
|
||||
return requests.get(url, **kwargs)
|
||||
'''
|
||||
raise NotImplementedError('MediaServer.make_request() must be implemented')
|
||||
|
||||
def validate(self):
|
||||
'''
|
||||
Called to check that the configured media server values are correct.
|
||||
'''
|
||||
raise NotImplementedError('MediaServer.validate() must be implemented')
|
||||
|
||||
def update(self):
|
||||
'''
|
||||
Called after the `Media` instance has saved a downloaded file.
|
||||
'''
|
||||
raise NotImplementedError('MediaServer.update() must be implemented')
|
||||
|
||||
|
||||
@ -48,30 +87,22 @@ class PlexMediaServer(MediaServer):
|
||||
'<a href="https://www.plexopedia.com/plex-media-server/api/server/libraries/" '
|
||||
'target="_blank">here</a></p>.')
|
||||
|
||||
def make_request(self, uri='/', params={}):
|
||||
headers = {'User-Agent': 'TubeSync'}
|
||||
token = self.object.loaded_options['token']
|
||||
params['X-Plex-Token'] = token
|
||||
base_parts = urlsplit(self.object.url)
|
||||
qs = urlencode(params)
|
||||
url = urlunsplit((base_parts.scheme, base_parts.netloc, uri, qs, ''))
|
||||
if self.object.verify_https:
|
||||
log.debug(f'[plex media server] Making HTTP GET request to: {url}')
|
||||
return requests.get(url, headers=headers, verify=True,
|
||||
timeout=self.TIMEOUT)
|
||||
else:
|
||||
def make_request(self, uri='/', /, *, headers={}, params={}):
|
||||
url, kwargs = self.make_request_args(uri=uri, headers=headers, token_param='X-Plex-Token', params=params)
|
||||
log.debug(f'[plex media server] Making HTTP GET request to: {url}')
|
||||
if self.object.use_https and not kwargs['verify']:
|
||||
# If not validating SSL, given this is likely going to be for an internal
|
||||
# or private network, that Plex issues certs *.hash.plex.direct and that
|
||||
# the warning won't ever been sensibly seen in the HTTPS logs, hide it
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
return requests.get(url, headers=headers, verify=False,
|
||||
timeout=self.TIMEOUT)
|
||||
return requests.get(url, **kwargs)
|
||||
return requests.get(url, **kwargs)
|
||||
|
||||
def validate(self):
|
||||
'''
|
||||
A Plex server requires a host, port, access token and a comma-separated
|
||||
list if library IDs.
|
||||
list of library IDs.
|
||||
'''
|
||||
# Check all the required values are present
|
||||
if not self.object.host:
|
||||
@ -85,7 +116,7 @@ class PlexMediaServer(MediaServer):
|
||||
if port < 1 or port > 65535:
|
||||
raise ValidationError('Plex Media Server "port" must be between 1 '
|
||||
'and 65535')
|
||||
options = self.object.loaded_options
|
||||
options = self.object.options
|
||||
if 'token' not in options:
|
||||
raise ValidationError('Plex Media Server requires a "token"')
|
||||
token = options['token'].strip()
|
||||
@ -152,7 +183,7 @@ class PlexMediaServer(MediaServer):
|
||||
|
||||
def update(self):
|
||||
# For each section / library ID pop off a request to refresh it
|
||||
libraries = self.object.loaded_options.get('libraries', '')
|
||||
libraries = self.object.options.get('libraries', '')
|
||||
for library_id in libraries.split(','):
|
||||
library_id = library_id.strip()
|
||||
uri = f'/library/sections/{library_id}/refresh'
|
||||
@ -172,19 +203,47 @@ class JellyfinMediaServer(MediaServer):
|
||||
HELP = _('<p>To connect your TubeSync server to your Jellyfin Media Server, please enter the details below.</p>'
|
||||
'<p>The <strong>host</strong> can be either an IP address or a valid hostname.</p>'
|
||||
'<p>The <strong>port</strong> should be between 1 and 65536.</p>'
|
||||
'<p>The <strong>token</strong> is required for API access. You can generate a token in your Jellyfin user profile settings.</p>'
|
||||
'<p>The <strong>libraries</strong> is a comma-separated list of library IDs in Jellyfin.</p>')
|
||||
'<p>The "API Key" <strong>token</strong> is required for API access. Your Jellyfin administrator can generate an "API Key" token for use with TubeSync for you.</p>'
|
||||
'<p>The <strong>libraries</strong> is a comma-separated list of library IDs in Jellyfin. Leave this blank to see a list.</p>')
|
||||
|
||||
def make_request(self, uri='/', params={}):
|
||||
headers = {
|
||||
'User-Agent': 'TubeSync',
|
||||
'X-Emby-Token': self.object.loaded_options['token'] # Jellyfin uses the same `X-Emby-Token` header as Emby
|
||||
}
|
||||
def make_request(self, uri='/', /, *, headers={}, params={}, data={}, json=None, method='GET'):
|
||||
assert method in {'GET', 'POST'}, f'Unimplemented method: {method}'
|
||||
|
||||
headers.update({'Content-Type': 'application/json'})
|
||||
url, kwargs = self.make_request_args(uri=uri, token_header='X-Emby-Token', headers=headers, params=params)
|
||||
# From the Emby source code;
|
||||
# this is the order in which the headers are tried:
|
||||
# X-Emby-Authorization: ('MediaBrowser'|'Emby') 'Token'=<token_value>, 'Client'=<client_value>, 'Version'=<version_value>
|
||||
# X-Emby-Token: <token_value>
|
||||
# X-MediaBrowser-Token: <token_value>
|
||||
# Jellyfin uses 'Authorization' first,
|
||||
# then optionally falls back to the 'X-Emby-Authorization' header.
|
||||
# Jellyfin uses (") around values, but not keys in that header.
|
||||
token = kwargs['headers'].get('X-Emby-Token', None)
|
||||
if token:
|
||||
kwargs['headers'].update({
|
||||
'X-MediaBrowser-Token': token,
|
||||
'X-Emby-Authorization': f'Emby Token={token}, Client=TubeSync, Version={settings.VERSION}',
|
||||
'Authorization': f'MediaBrowser Token="{token}", Client="TubeSync", Version="{settings.VERSION}"',
|
||||
})
|
||||
|
||||
url = f'{self.object.url}{uri}'
|
||||
log.debug(f'[jellyfin media server] Making HTTP GET request to: {url}')
|
||||
|
||||
return requests.get(url, headers=headers, verify=self.object.verify_https, timeout=self.TIMEOUT)
|
||||
log.debug(f'[jellyfin media server] Making HTTP {method} request to: {url}')
|
||||
if self.object.use_https and not kwargs['verify']:
|
||||
# not verifying certificates
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
return requests.request(
|
||||
method, url,
|
||||
data=data,
|
||||
json=json,
|
||||
**kwargs,
|
||||
)
|
||||
return requests.request(
|
||||
method, url,
|
||||
data=data,
|
||||
json=json,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def validate(self):
|
||||
if not self.object.host:
|
||||
@ -199,7 +258,7 @@ class JellyfinMediaServer(MediaServer):
|
||||
except (TypeError, ValueError):
|
||||
raise ValidationError('Jellyfin Media Server "port" must be an integer')
|
||||
|
||||
options = self.object.loaded_options
|
||||
options = self.object.options
|
||||
if 'token' not in options:
|
||||
raise ValidationError('Jellyfin Media Server requires a "token"')
|
||||
if 'libraries' not in options:
|
||||
@ -243,10 +302,10 @@ class JellyfinMediaServer(MediaServer):
|
||||
return True
|
||||
|
||||
def update(self):
|
||||
libraries = self.object.loaded_options.get('libraries', '').split(',')
|
||||
libraries = self.object.options.get('libraries', '').split(',')
|
||||
for library_id in map(str.strip, libraries):
|
||||
uri = f'/Library/{library_id}/Refresh'
|
||||
response = self.make_request(uri)
|
||||
uri = f'/Items/{library_id}/Refresh'
|
||||
response = self.make_request(uri, method='POST')
|
||||
if response.status_code != 204: # 204 No Content is expected for successful refresh
|
||||
raise MediaServerError(f'Failed to refresh Jellyfin library "{library_id}", status code: {response.status_code}')
|
||||
return True
|
||||
|
@ -0,0 +1,408 @@
|
||||
# Manually adjusted based on the generated file.
|
||||
# Generated by Django 5.1.8 on 2025-04-10 15:29
|
||||
|
||||
import django.core.files.storage
|
||||
import django.core.validators
|
||||
import django.db.models.deletion
|
||||
import sync.fields
|
||||
import sync.models
|
||||
import uuid
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
# Functions from the following migrations need manual copying.
|
||||
# Move them and any dependencies into this file, then update the
|
||||
# RunPython operations to refer to the local versions:
|
||||
# sync.migrations.0013_fix_elative_media_file
|
||||
from django.conf import settings
|
||||
from pathlib import Path
|
||||
|
||||
def fix_media_file(apps, schema_editor):
|
||||
Media = apps.get_model('sync', 'Media')
|
||||
download_dir = str(settings.DOWNLOAD_ROOT)
|
||||
download_dir_path = Path(download_dir)
|
||||
for media in Media.objects.filter(downloaded=True):
|
||||
if media.media_file.path.startswith(download_dir):
|
||||
media_path = Path(media.media_file.path)
|
||||
relative_path = media_path.relative_to(download_dir_path)
|
||||
media.media_file.name = str(relative_path)
|
||||
media.save()
|
||||
|
||||
# Function above has been copied/modified and RunPython operations adjusted.
|
||||
|
||||
def media_file_location():
|
||||
return str(settings.DOWNLOAD_ROOT)
|
||||
|
||||
# Used the above function for storage location.
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
replaces = [
|
||||
# ('sync', '0001_initial_squashed_0010_auto_20210924_0554'),
|
||||
('sync', '0001_initial'),
|
||||
('sync', '0002_auto_20201213_0817'),
|
||||
('sync', '0003_source_copy_thumbnails'),
|
||||
('sync', '0004_source_media_format'),
|
||||
('sync', '0005_auto_20201219_0312'),
|
||||
('sync', '0006_source_write_nfo'),
|
||||
('sync', '0007_auto_20201219_0645'),
|
||||
('sync', '0008_source_download_cap'),
|
||||
('sync', '0009_auto_20210218_0442'),
|
||||
('sync', '0010_auto_20210924_0554'),
|
||||
# ('sync', '0011_auto_20220201_1654_squashed_0020_auto_20231024_1825'),
|
||||
('sync', '0011_auto_20220201_1654'),
|
||||
('sync', '0012_alter_media_downloaded_format'),
|
||||
('sync', '0013_fix_elative_media_file'),
|
||||
('sync', '0014_alter_media_media_file'),
|
||||
('sync', '0015_auto_20230213_0603'),
|
||||
('sync', '0016_auto_20230214_2052'),
|
||||
('sync', '0017_alter_source_sponsorblock_categories'),
|
||||
('sync', '0018_source_subtitles'),
|
||||
('sync', '0019_add_delete_removed_media'),
|
||||
('sync', '0020_auto_20231024_1825'),
|
||||
('sync', '0021_source_copy_channel_images'),
|
||||
('sync', '0022_add_delete_files_on_disk'),
|
||||
('sync', '0023_media_duration_filter'),
|
||||
('sync', '0024_auto_20240717_1535'),
|
||||
('sync', '0025_add_video_type_support'),
|
||||
('sync', '0026_alter_source_sub_langs'),
|
||||
('sync', '0027_alter_source_sponsorblock_categories'),
|
||||
('sync', '0028_alter_source_source_resolution'),
|
||||
('sync', '0029_alter_mediaserver_fields'),
|
||||
('sync', '0030_alter_source_source_vcodec'),
|
||||
]
|
||||
|
||||
initial = True
|
||||
|
||||
dependencies = [
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='Source',
|
||||
fields=[
|
||||
('uuid', models.UUIDField(
|
||||
default=uuid.uuid4, editable=False, help_text='UUID of the source', primary_key=True, serialize=False, verbose_name='uuid',
|
||||
)),
|
||||
('created', models.DateTimeField(
|
||||
auto_now_add=True, db_index=True, help_text='Date and time the source was created', verbose_name='created',
|
||||
)),
|
||||
('last_crawl', models.DateTimeField(
|
||||
blank=True, db_index=True, help_text='Date and time the source was last crawled', null=True, verbose_name='last crawl',
|
||||
)),
|
||||
('source_type', models.CharField(
|
||||
choices=[('c', 'YouTube channel'), ('i', 'YouTube channel by ID'), ('p', 'YouTube playlist')], db_index=True, default='c', help_text='Source type', max_length=1, verbose_name='source type',
|
||||
)),
|
||||
('key', models.CharField(
|
||||
db_index=True, help_text='Source key, such as exact YouTube channel name or playlist ID', max_length=100, unique=True, verbose_name='key',
|
||||
)),
|
||||
('name', models.CharField(
|
||||
db_index=True, help_text='Friendly name for the source, used locally in TubeSync only', max_length=100, unique=True, verbose_name='name',
|
||||
)),
|
||||
('directory', models.CharField(
|
||||
db_index=True, help_text='Directory name to save the media into', max_length=100, unique=True, verbose_name='directory',
|
||||
)),
|
||||
('index_schedule', models.IntegerField(
|
||||
choices=[(3600, 'Every hour'), (7200, 'Every 2 hours'), (10800, 'Every 3 hours'), (14400, 'Every 4 hours'), (18000, 'Every 5 hours'), (21600, 'Every 6 hours'), (43200, 'Every 12 hours'), (86400, 'Every 24 hours'), (259200, 'Every 3 days'), (604800, 'Every 7 days'), (0, 'Never')], db_index=True, default=86400, help_text='Schedule of how often to index the source for new media', verbose_name='index schedule',
|
||||
)),
|
||||
('delete_old_media', models.BooleanField(
|
||||
default=False, help_text='Delete old media after "days to keep" days?', verbose_name='delete old media',
|
||||
)),
|
||||
('days_to_keep', models.PositiveSmallIntegerField(
|
||||
default=14, help_text='If "delete old media" is ticked, the number of days after which to automatically delete media', verbose_name='days to keep',
|
||||
)),
|
||||
('source_resolution', models.CharField(
|
||||
choices=[('360p', '360p (SD)'), ('480p', '480p (SD)'), ('720p', '720p (HD)'), ('1080p', '1080p (Full HD)'), ('1440p', '1440p (2K)'), ('2160p', '2160p (4K)'), ('4320p', '4320p (8K)'), ('audio', 'Audio only')], db_index=True, default='1080p', help_text='Source resolution, desired video resolution to download', max_length=8, verbose_name='source resolution',
|
||||
)),
|
||||
('source_vcodec', models.CharField(
|
||||
choices=[('AVC1', 'AVC1 (H.264)'), ('VP9', 'VP9')], db_index=True, default='VP9', help_text='Source video codec, desired video encoding format to download (ignored if "resolution" is audio only)', max_length=8, verbose_name='source video codec',
|
||||
)),
|
||||
('source_acodec', models.CharField(
|
||||
choices=[('MP4A', 'MP4A'), ('OPUS', 'OPUS')], db_index=True, default='OPUS', help_text='Source audio codec, desired audio encoding format to download', max_length=8, verbose_name='source audio codec',
|
||||
)),
|
||||
('prefer_60fps', models.BooleanField(
|
||||
default=True, help_text='Where possible, prefer 60fps media for this source', verbose_name='prefer 60fps',
|
||||
)),
|
||||
('prefer_hdr', models.BooleanField(
|
||||
default=False, help_text='Where possible, prefer HDR media for this source', verbose_name='prefer hdr',
|
||||
)),
|
||||
('fallback', models.CharField(
|
||||
choices=[('f', 'Fail, do not download any media'), ('n', 'Get next best resolution or codec instead'), ('h', 'Get next best resolution but at least HD')], db_index=True, default='h', help_text='What do do when media in your source resolution and codecs is not available', max_length=1, verbose_name='fallback',
|
||||
)),
|
||||
('has_failed', models.BooleanField(
|
||||
default=False, help_text='Source has failed to index media', verbose_name='has failed',
|
||||
)),
|
||||
('copy_thumbnails', models.BooleanField(
|
||||
default=False, help_text='Copy thumbnails with the media, these may be detected and used by some media servers', verbose_name='copy thumbnails',
|
||||
)),
|
||||
('media_format', models.CharField(
|
||||
default='{yyyy_mm_dd}_{source}_{title}_{key}_{format}.{ext}', help_text='File format to use for saving files, detailed options at bottom of page.', max_length=200, verbose_name='media format',
|
||||
)),
|
||||
('write_nfo', models.BooleanField(
|
||||
default=False, help_text='Write an NFO file in XML with the media info, these may be detected and used by some media servers', verbose_name='write nfo',
|
||||
)),
|
||||
('download_cap', models.IntegerField(
|
||||
choices=[(0, 'No cap'), (604800, '1 week (7 days)'), (2592000, '1 month (30 days)'), (7776000, '3 months (90 days)'), (15552000, '6 months (180 days)'), (31536000, '1 year (365 days)'), (63072000, '2 years (730 days)'), (94608000, '3 years (1095 days)'), (157680000, '5 years (1825 days)'), (315360000, '10 years (3650 days)')], default=0, help_text='Do not download media older than this capped date', verbose_name='download cap',
|
||||
)),
|
||||
('download_media', models.BooleanField(
|
||||
default=True, help_text='Download media from this source, if not selected the source will only be indexed', verbose_name='download media',
|
||||
)),
|
||||
('write_json', models.BooleanField(
|
||||
default=False, help_text='Write a JSON file with the media info, these may be detected and used by some media servers', verbose_name='write json',
|
||||
)),
|
||||
],
|
||||
options={
|
||||
'verbose_name': 'Source',
|
||||
'verbose_name_plural': 'Sources',
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='MediaServer',
|
||||
fields=[
|
||||
('id', models.AutoField(
|
||||
auto_created=True, primary_key=True, serialize=False, verbose_name='ID',
|
||||
)),
|
||||
('server_type', models.CharField(
|
||||
choices=[('p', 'Plex')], db_index=True, default='p', help_text='Server type', max_length=1, verbose_name='server type',
|
||||
)),
|
||||
('host', models.CharField(
|
||||
db_index=True, help_text='Hostname or IP address of the media server', max_length=200, verbose_name='host',
|
||||
)),
|
||||
('port', models.PositiveIntegerField(
|
||||
db_index=True, help_text='Port number of the media server', verbose_name='port',
|
||||
)),
|
||||
('use_https', models.BooleanField(
|
||||
default=True, help_text='Connect to the media server over HTTPS', verbose_name='use https',
|
||||
)),
|
||||
('verify_https', models.BooleanField(
|
||||
default=False, help_text='If connecting over HTTPS, verify the SSL certificate is valid', verbose_name='verify https',
|
||||
)),
|
||||
('options', models.TextField(
|
||||
blank=True, help_text='JSON encoded options for the media server', null=True, verbose_name='options',
|
||||
)),
|
||||
],
|
||||
options={
|
||||
'verbose_name': 'Media Server',
|
||||
'verbose_name_plural': 'Media Servers',
|
||||
'unique_together': {('host', 'port')},
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='Media',
|
||||
fields=[
|
||||
('uuid', models.UUIDField(
|
||||
default=uuid.uuid4, editable=False, help_text='UUID of the media', primary_key=True, serialize=False, verbose_name='uuid',
|
||||
)),
|
||||
('created', models.DateTimeField(
|
||||
auto_now_add=True, db_index=True, help_text='Date and time the media was created', verbose_name='created',
|
||||
)),
|
||||
('published', models.DateTimeField(
|
||||
blank=True, db_index=True, help_text='Date and time the media was published on the source', null=True, verbose_name='published',
|
||||
)),
|
||||
('key', models.CharField(
|
||||
db_index=True, help_text='Media key, such as exact YouTube video ID', max_length=100, verbose_name='key',
|
||||
)),
|
||||
('thumb', models.ImageField(
|
||||
blank=True, height_field='thumb_height', help_text='Thumbnail', max_length=200, null=True, upload_to=sync.models.get_media_thumb_path, verbose_name='thumb', width_field='thumb_width',
|
||||
)),
|
||||
('thumb_width', models.PositiveSmallIntegerField(
|
||||
blank=True, help_text='Width (X) of the thumbnail', null=True, verbose_name='thumb width',
|
||||
)),
|
||||
('thumb_height', models.PositiveSmallIntegerField(
|
||||
blank=True, help_text='Height (Y) of the thumbnail', null=True, verbose_name='thumb height',
|
||||
)),
|
||||
('metadata', models.TextField(
|
||||
blank=True, help_text='JSON encoded metadata for the media', null=True, verbose_name='metadata',
|
||||
)),
|
||||
('can_download', models.BooleanField(
|
||||
db_index=True, default=False, help_text='Media has a matching format and can be downloaded', verbose_name='can download',
|
||||
)),
|
||||
('media_file', models.FileField(
|
||||
blank=True, help_text='Media file', max_length=255, null=True, storage=django.core.files.storage.FileSystemStorage(location=media_file_location()), upload_to=sync.models.get_media_file_path, verbose_name='media file',
|
||||
)),
|
||||
('skip', models.BooleanField(
|
||||
db_index=True, default=False, help_text='Media will be skipped and not downloaded', verbose_name='skip',
|
||||
)),
|
||||
('downloaded', models.BooleanField(
|
||||
db_index=True, default=False, help_text='Media has been downloaded', verbose_name='downloaded',
|
||||
)),
|
||||
('download_date', models.DateTimeField(
|
||||
blank=True, db_index=True, help_text='Date and time the download completed', null=True, verbose_name='download date',
|
||||
)),
|
||||
('downloaded_format', models.CharField(
|
||||
blank=True, help_text='Video format (resolution) of the downloaded media', max_length=30, null=True, verbose_name='downloaded format',
|
||||
)),
|
||||
('downloaded_height', models.PositiveIntegerField(
|
||||
blank=True, help_text='Height in pixels of the downloaded media', null=True, verbose_name='downloaded height',
|
||||
)),
|
||||
('downloaded_width', models.PositiveIntegerField(
|
||||
blank=True, help_text='Width in pixels of the downloaded media', null=True, verbose_name='downloaded width',
|
||||
)),
|
||||
('downloaded_audio_codec', models.CharField(
|
||||
blank=True, help_text='Audio codec of the downloaded media', max_length=30, null=True, verbose_name='downloaded audio codec',
|
||||
)),
|
||||
('downloaded_video_codec', models.CharField(
|
||||
blank=True, help_text='Video codec of the downloaded media', max_length=30, null=True, verbose_name='downloaded video codec',
|
||||
)),
|
||||
('downloaded_container', models.CharField(
|
||||
blank=True, help_text='Container format of the downloaded media', max_length=30, null=True, verbose_name='downloaded container format',
|
||||
)),
|
||||
('downloaded_fps', models.PositiveSmallIntegerField(
|
||||
blank=True, help_text='FPS of the downloaded media', null=True, verbose_name='downloaded fps',
|
||||
)),
|
||||
('downloaded_hdr', models.BooleanField(
|
||||
default=False, help_text='Downloaded media has HDR', verbose_name='downloaded hdr',
|
||||
)),
|
||||
('downloaded_filesize', models.PositiveBigIntegerField(
|
||||
blank=True, db_index=True, help_text='Size of the downloaded media in bytes', null=True, verbose_name='downloaded filesize',
|
||||
)),
|
||||
('source', models.ForeignKey(
|
||||
help_text='Source the media belongs to', on_delete=django.db.models.deletion.CASCADE, related_name='media_source', to='sync.source',
|
||||
)),
|
||||
],
|
||||
options={
|
||||
'verbose_name': 'Media',
|
||||
'verbose_name_plural': 'Media',
|
||||
'unique_together': {('source', 'key')},
|
||||
},
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='media',
|
||||
name='media_file',
|
||||
field=models.FileField(blank=True, help_text='Media file', max_length=255, null=True, storage=django.core.files.storage.FileSystemStorage(base_url='/media-data/', location=media_file_location()), upload_to=sync.models.get_media_file_path, verbose_name='media file'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='media',
|
||||
name='skip',
|
||||
field=models.BooleanField(db_index=True, default=False, help_text='INTERNAL FLAG - Media will be skipped and not downloaded', verbose_name='skip'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='embed_metadata',
|
||||
field=models.BooleanField(default=False, help_text='Embed metadata from source into file', verbose_name='embed metadata'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='embed_thumbnail',
|
||||
field=models.BooleanField(default=False, help_text='Embed thumbnail into the file', verbose_name='embed thumbnail'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='enable_sponsorblock',
|
||||
field=models.BooleanField(default=True, help_text='Use SponsorBlock?', verbose_name='enable sponsorblock'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='write_subtitles',
|
||||
field=models.BooleanField(default=False, help_text='Download video subtitles', verbose_name='write subtitles'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='delete_removed_media',
|
||||
field=models.BooleanField(default=False, help_text='Delete media that is no longer on this playlist', verbose_name='delete removed media'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='auto_subtitles',
|
||||
field=models.BooleanField(default=False, help_text='Accept auto-generated subtitles', verbose_name='accept auto-generated subs'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='copy_channel_images',
|
||||
field=models.BooleanField(default=False, help_text='Copy channel banner and avatar. These may be detected and used by some media servers', verbose_name='copy channel images'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='delete_files_on_disk',
|
||||
field=models.BooleanField(default=False, help_text='Delete files on disk when they are removed from TubeSync', verbose_name='delete files on disk'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='media',
|
||||
name='duration',
|
||||
field=models.PositiveIntegerField(blank=True, help_text='Duration of media in seconds', null=True, verbose_name='duration'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='filter_seconds',
|
||||
field=models.PositiveIntegerField(blank=True, help_text='Filter Media based on Min/Max duration. Leave blank or 0 to disable filtering', null=True, verbose_name='filter seconds'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='filter_seconds_min',
|
||||
field=models.BooleanField(choices=[(True, 'Minimum Length'), (False, 'Maximum Length')], default=True, help_text='When Filter Seconds is > 0, do we skip on minimum (video shorter than limit) or maximum (video greater than maximum) video duration', verbose_name='filter seconds min/max'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='filter_text_invert',
|
||||
field=models.BooleanField(default=False, help_text='Invert filter string regex match, skip any matching titles when selected', verbose_name='invert filter text matching'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='media',
|
||||
name='manual_skip',
|
||||
field=models.BooleanField(db_index=True, default=False, help_text='Media marked as "skipped", won\'t be downloaded', verbose_name='manual_skip'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='media',
|
||||
name='title',
|
||||
field=models.CharField(blank=True, default='', help_text='Video title', max_length=200, verbose_name='title'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='filter_text',
|
||||
field=models.CharField(blank=True, default='', help_text='Regex compatible filter string for video titles', max_length=200, verbose_name='filter string'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='index_videos',
|
||||
field=models.BooleanField(default=True, help_text='Index video media from this source', verbose_name='index videos'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='index_streams',
|
||||
field=models.BooleanField(default=False, help_text='Index live stream media from this source', verbose_name='index streams'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='sub_langs',
|
||||
field=models.CharField(default='en', help_text='List of subtitles langs to download, comma-separated. Example: en,fr or all,-fr,-live_chat', max_length=30, validators=[django.core.validators.RegexValidator(message='Subtitle langs must be a comma-separated list of langs. example: en,fr or all,-fr,-live_chat', regex='^(\\-?[\\_\\.a-zA-Z-]+(,|$))+')], verbose_name='subs langs'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='sponsorblock_categories',
|
||||
field=sync.fields.CommaSepChoiceField(all_choice='all', all_label='(All Categories)', allow_all=True, default='all', help_text='Select the SponsorBlock categories that you wish to be removed from downloaded videos.', max_length=128, possible_choices=[('sponsor', 'Sponsor'), ('intro', 'Intermission/Intro Animation'), ('outro', 'Endcards/Credits'), ('selfpromo', 'Unpaid/Self Promotion'), ('preview', 'Preview/Recap'), ('filler', 'Filler Tangent'), ('interaction', 'Interaction Reminder'), ('music_offtopic', 'Non-Music Section')], verbose_name=''),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='source',
|
||||
name='source_resolution',
|
||||
field=models.CharField(choices=[('audio', 'Audio only'), ('360p', '360p (SD)'), ('480p', '480p (SD)'), ('720p', '720p (HD)'), ('1080p', '1080p (Full HD)'), ('1440p', '1440p (2K)'), ('2160p', '2160p (4K)'), ('4320p', '4320p (8K)')], db_index=True, default='1080p', help_text='Source resolution, desired video resolution to download', max_length=8, verbose_name='source resolution'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='mediaserver',
|
||||
name='options',
|
||||
field=models.TextField(help_text='JSON encoded options for the media server', null=True, verbose_name='options'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='mediaserver',
|
||||
name='server_type',
|
||||
field=models.CharField(choices=[('j', 'Jellyfin'), ('p', 'Plex')], db_index=True, default='p', help_text='Server type', max_length=1, verbose_name='server type'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='mediaserver',
|
||||
name='use_https',
|
||||
field=models.BooleanField(default=False, help_text='Connect to the media server over HTTPS', verbose_name='use https'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='mediaserver',
|
||||
name='verify_https',
|
||||
field=models.BooleanField(default=True, help_text='If connecting over HTTPS, verify the SSL certificate is valid', verbose_name='verify https'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='source',
|
||||
name='source_vcodec',
|
||||
field=models.CharField(choices=[('AVC1', 'AVC1 (H.264)'), ('VP9', 'VP9'), ('AV1', 'AV1')], db_index=True, default='VP9', help_text='Source video codec, desired video encoding format to download (ignored if "resolution" is audio only)', max_length=8, verbose_name='source video codec'),
|
||||
),
|
||||
migrations.RunPython(
|
||||
code=fix_media_file,
|
||||
reverse_code=migrations.RunPython.noop,
|
||||
),
|
||||
]
|
@ -1,7 +1,7 @@
|
||||
# Generated by Django 3.2.18 on 2023-02-14 20:52
|
||||
|
||||
from django.db import migrations, models
|
||||
import sync.models
|
||||
import sync.fields
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
@ -29,6 +29,6 @@ class Migration(migrations.Migration):
|
||||
migrations.AddField(
|
||||
model_name='source',
|
||||
name='sponsorblock_categories',
|
||||
field=sync.models.CommaSepChoiceField(default='all', possible_choices=(('all', 'All'), ('sponsor', 'Sponsor'), ('intro', 'Intermission/Intro Animation'), ('outro', 'Endcards/Credits'), ('selfpromo', 'Unpaid/Self Promotion'), ('preview', 'Preview/Recap'), ('filler', 'Filler Tangent'), ('interaction', 'Interaction Reminder'), ('music_offtopic', 'Non-Music Section'))),
|
||||
field=sync.fields.CommaSepChoiceField(default='all', possible_choices=(('all', 'All'), ('sponsor', 'Sponsor'), ('intro', 'Intermission/Intro Animation'), ('outro', 'Endcards/Credits'), ('selfpromo', 'Unpaid/Self Promotion'), ('preview', 'Preview/Recap'), ('filler', 'Filler Tangent'), ('interaction', 'Interaction Reminder'), ('music_offtopic', 'Non-Music Section'))),
|
||||
),
|
||||
]
|
||||
|
52
tubesync/sync/migrations/0031_metadata_metadataformat.py
Normal file
52
tubesync/sync/migrations/0031_metadata_metadataformat.py
Normal file
@ -0,0 +1,52 @@
|
||||
# Generated by Django 5.1.8 on 2025-04-11 07:36
|
||||
|
||||
import common.json
|
||||
import django.db.models.deletion
|
||||
import uuid
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('sync', '0001_squashed_0030_alter_source_source_vcodec'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='Metadata',
|
||||
fields=[
|
||||
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, help_text='UUID of the metadata', primary_key=True, serialize=False, verbose_name='uuid')),
|
||||
('site', models.CharField(blank=True, default='Youtube', help_text='Site from which the metadata was retrieved', max_length=256, verbose_name='site')),
|
||||
('key', models.CharField(blank=True, default='', help_text='Media identifier at the site from which the metadata was retrieved', max_length=256, verbose_name='key')),
|
||||
('created', models.DateTimeField(auto_now_add=True, db_index=True, help_text='Date and time the metadata was created', verbose_name='created')),
|
||||
('retrieved', models.DateTimeField(auto_now_add=True, db_index=True, help_text='Date and time the metadata was retrieved', verbose_name='retrieved')),
|
||||
('uploaded', models.DateTimeField(help_text='Date and time the media was uploaded', null=True, verbose_name='uploaded')),
|
||||
('published', models.DateTimeField(help_text='Date and time the media was published', null=True, verbose_name='published')),
|
||||
('value', models.JSONField(default=dict, encoder=common.json.JSONEncoder, help_text='JSON metadata object', verbose_name='value')),
|
||||
('media', models.ForeignKey(help_text='Media the metadata belongs to', on_delete=django.db.models.deletion.CASCADE, related_name='metadata_media', to='sync.media')),
|
||||
],
|
||||
options={
|
||||
'verbose_name': 'Metadata about a Media item',
|
||||
'verbose_name_plural': 'Metadata about a Media item',
|
||||
'unique_together': {('media', 'site', 'key')},
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='MetadataFormat',
|
||||
fields=[
|
||||
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, help_text='UUID of the format', primary_key=True, serialize=False, verbose_name='uuid')),
|
||||
('site', models.CharField(blank=True, default='Youtube', help_text='Site from which the format is available', max_length=256, verbose_name='site')),
|
||||
('key', models.CharField(blank=True, default='', help_text='Media identifier at the site for which this format is available', max_length=256, verbose_name='key')),
|
||||
('number', models.PositiveIntegerField(help_text='Ordering number for this format', verbose_name='number')),
|
||||
('code', models.CharField(blank=True, default='', help_text='Format identification code', max_length=64, verbose_name='code')),
|
||||
('value', models.JSONField(default=dict, encoder=common.json.JSONEncoder, help_text='JSON metadata format object', verbose_name='value')),
|
||||
('metadata', models.ForeignKey(help_text='Metadata the format belongs to', on_delete=django.db.models.deletion.CASCADE, related_name='metadataformat_metadata', to='sync.metadata')),
|
||||
],
|
||||
options={
|
||||
'verbose_name': 'Format from the Metadata about a Media item',
|
||||
'verbose_name_plural': 'Formats from the Metadata about a Media item',
|
||||
'unique_together': {('metadata', 'site', 'key', 'code'), ('metadata', 'site', 'key', 'number')},
|
||||
},
|
||||
),
|
||||
]
|
@ -0,0 +1,68 @@
|
||||
# Generated by Django 5.1.8 on 2025-04-23 18:10
|
||||
|
||||
import common.json
|
||||
import django.db.models.deletion
|
||||
import uuid
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
replaces = [('sync', '0031_metadata_metadataformat'), ('sync', '0032_alter_metadata_options_alter_metadataformat_options_and_more')]
|
||||
|
||||
dependencies = [
|
||||
('sync', '0001_squashed_0030_alter_source_source_vcodec'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='Metadata',
|
||||
fields=[
|
||||
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, help_text='UUID of the metadata', primary_key=True, serialize=False, verbose_name='uuid')),
|
||||
('site', models.CharField(blank=True, db_index=True, default='Youtube', help_text='Site from which the metadata was retrieved', max_length=256, verbose_name='site')),
|
||||
('key', models.CharField(blank=True, db_index=True, default='', help_text='Media identifier at the site from which the metadata was retrieved', max_length=256, verbose_name='key')),
|
||||
('created', models.DateTimeField(auto_now_add=True, db_index=True, help_text='Date and time the metadata was created', verbose_name='created')),
|
||||
('retrieved', models.DateTimeField(auto_now_add=True, db_index=True, help_text='Date and time the metadata was retrieved', verbose_name='retrieved')),
|
||||
('uploaded', models.DateTimeField(db_index=True, help_text='Date and time the media was uploaded', null=True, verbose_name='uploaded')),
|
||||
('published', models.DateTimeField(db_index=True, help_text='Date and time the media was published', null=True, verbose_name='published')),
|
||||
('value', models.JSONField(default=dict, encoder=common.json.JSONEncoder, help_text='JSON metadata object', verbose_name='value')),
|
||||
('media', models.OneToOneField(help_text='Media the metadata belongs to', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='new_metadata', to='sync.media')),
|
||||
],
|
||||
options={
|
||||
'verbose_name': 'Metadata about Media',
|
||||
'verbose_name_plural': 'Metadata about Media',
|
||||
'unique_together': {('media', 'site', 'key')},
|
||||
'get_latest_by': ['-retrieved', '-created'],
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='MetadataFormat',
|
||||
fields=[
|
||||
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, help_text='UUID of the format', primary_key=True, serialize=False, verbose_name='uuid')),
|
||||
('site', models.CharField(blank=True, db_index=True, default='Youtube', help_text='Site from which the format is available', max_length=256, verbose_name='site')),
|
||||
('key', models.CharField(blank=True, db_index=True, default='', help_text='Media identifier at the site from which this format is available', max_length=256, verbose_name='key')),
|
||||
('number', models.PositiveIntegerField(help_text='Ordering number for this format', verbose_name='number')),
|
||||
('code', models.CharField(blank=True, default='', help_text='Format identification code', max_length=64, verbose_name='code')),
|
||||
('value', models.JSONField(default=dict, encoder=common.json.JSONEncoder, help_text='JSON metadata format object', verbose_name='value')),
|
||||
('metadata', models.ForeignKey(help_text='Metadata the format belongs to', on_delete=django.db.models.deletion.CASCADE, related_name='format', to='sync.metadata')),
|
||||
],
|
||||
options={
|
||||
'verbose_name': 'Format from Media Metadata',
|
||||
'verbose_name_plural': 'Formats from Media Metadata',
|
||||
'unique_together': {('metadata', 'site', 'key', 'number')},
|
||||
'ordering': ['site', 'key', 'number'],
|
||||
},
|
||||
),
|
||||
migrations.AlterModelTable(
|
||||
name='metadata',
|
||||
table='sync_media_metadata',
|
||||
),
|
||||
migrations.AlterModelTable(
|
||||
name='metadataformat',
|
||||
table='sync_media_metadata_format',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='metadataformat',
|
||||
name='code',
|
||||
),
|
||||
]
|
@ -0,0 +1,78 @@
|
||||
# Generated by Django 5.1.8 on 2025-04-23 18:06
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('sync', '0031_metadata_metadataformat'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterModelOptions(
|
||||
name='metadata',
|
||||
options={'get_latest_by': ['-retrieved', '-created'], 'verbose_name': 'Metadata about Media', 'verbose_name_plural': 'Metadata about Media'},
|
||||
),
|
||||
migrations.AlterModelOptions(
|
||||
name='metadataformat',
|
||||
options={'ordering': ['site', 'key', 'number'], 'verbose_name': 'Format from Media Metadata', 'verbose_name_plural': 'Formats from Media Metadata'},
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='metadataformat',
|
||||
unique_together={('metadata', 'site', 'key', 'number')},
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='metadata',
|
||||
name='key',
|
||||
field=models.CharField(blank=True, db_index=True, default='', help_text='Media identifier at the site from which the metadata was retrieved', max_length=256, verbose_name='key'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='metadata',
|
||||
name='media',
|
||||
field=models.OneToOneField(help_text='Media the metadata belongs to', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='new_metadata', to='sync.media'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='metadata',
|
||||
name='published',
|
||||
field=models.DateTimeField(db_index=True, help_text='Date and time the media was published', null=True, verbose_name='published'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='metadata',
|
||||
name='site',
|
||||
field=models.CharField(blank=True, db_index=True, default='Youtube', help_text='Site from which the metadata was retrieved', max_length=256, verbose_name='site'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='metadata',
|
||||
name='uploaded',
|
||||
field=models.DateTimeField(db_index=True, help_text='Date and time the media was uploaded', null=True, verbose_name='uploaded'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='metadataformat',
|
||||
name='key',
|
||||
field=models.CharField(blank=True, db_index=True, default='', help_text='Media identifier at the site from which this format is available', max_length=256, verbose_name='key'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='metadataformat',
|
||||
name='metadata',
|
||||
field=models.ForeignKey(help_text='Metadata the format belongs to', on_delete=django.db.models.deletion.CASCADE, related_name='format', to='sync.metadata'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='metadataformat',
|
||||
name='site',
|
||||
field=models.CharField(blank=True, db_index=True, default='Youtube', help_text='Site from which the format is available', max_length=256, verbose_name='site'),
|
||||
),
|
||||
migrations.AlterModelTable(
|
||||
name='metadata',
|
||||
table='sync_media_metadata',
|
||||
),
|
||||
migrations.AlterModelTable(
|
||||
name='metadataformat',
|
||||
table='sync_media_metadata_format',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='metadataformat',
|
||||
name='code',
|
||||
),
|
||||
]
|
37
tubesync/sync/migrations/0032_metadata_transfer.py
Normal file
37
tubesync/sync/migrations/0032_metadata_transfer.py
Normal file
@ -0,0 +1,37 @@
|
||||
# Hand-crafted data migration
|
||||
|
||||
from django.db import migrations
|
||||
from common.utils import django_queryset_generator as qs_gen
|
||||
from sync.models import Media
|
||||
|
||||
|
||||
def use_tables(apps, schema_editor):
|
||||
#Media = apps.get_model('sync', 'Media')
|
||||
qs = Media.objects.filter(metadata__isnull=False)
|
||||
for media in qs_gen(qs):
|
||||
media.save_to_metadata('migrated', True)
|
||||
|
||||
def restore_metadata_column(apps, schema_editor):
|
||||
#Media = apps.get_model('sync', 'Media')
|
||||
qs = Media.objects.filter(metadata__isnull=False)
|
||||
for media in qs_gen(qs):
|
||||
metadata = media.loaded_metadata
|
||||
for key in {'migrated', '_using_table'}:
|
||||
metadata.pop(key, None)
|
||||
media.metadata = media.metadata_dumps(arg_dict=metadata)
|
||||
media.save()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('sync', '0031_squashed_metadata_metadataformat'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(
|
||||
code=use_tables,
|
||||
reverse_code=restore_metadata_column,
|
||||
),
|
||||
]
|
||||
|
@ -0,0 +1,29 @@
|
||||
# Generated by Django 5.1.9 on 2025-05-10 06:18
|
||||
|
||||
import common.json
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('sync', '0032_metadata_transfer'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='mediaserver',
|
||||
name='options',
|
||||
field=models.JSONField(encoder=common.json.JSONEncoder, help_text='Options for the media server', null=True, verbose_name='options'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='source',
|
||||
name='source_acodec',
|
||||
field=models.CharField(choices=[('OPUS', 'OPUS'), ('MP4A', 'MP4A')], db_index=True, default='OPUS', help_text='Source audio codec, desired audio encoding format to download', max_length=8, verbose_name='source audio codec'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='source',
|
||||
name='source_vcodec',
|
||||
field=models.CharField(choices=[('AV1', 'AV1'), ('VP9', 'VP9'), ('AVC1', 'AVC1 (H.264)')], db_index=True, default='VP9', help_text='Source video codec, desired video encoding format to download (ignored if "resolution" is audio only)', max_length=8, verbose_name='source video codec'),
|
||||
),
|
||||
]
|
19
tubesync/sync/models/__init__.py
Normal file
19
tubesync/sync/models/__init__.py
Normal file
@ -0,0 +1,19 @@
|
||||
# These are referenced from the migration files
|
||||
|
||||
from ._migrations import (
|
||||
get_media_file_path,
|
||||
get_media_thumb_path,
|
||||
media_file_storage,
|
||||
)
|
||||
|
||||
# The actual model classes
|
||||
# The order starts with independent classes
|
||||
# then the classes that depend on them follow.
|
||||
|
||||
from .media_server import MediaServer
|
||||
|
||||
from .source import Source
|
||||
from .media import Media
|
||||
from .metadata import Metadata
|
||||
from .metadata_format import MetadataFormat
|
||||
|
21
tubesync/sync/models/_migrations.py
Normal file
21
tubesync/sync/models/_migrations.py
Normal file
@ -0,0 +1,21 @@
|
||||
from pathlib import Path
|
||||
from django.conf import settings
|
||||
from django.core.files.storage import FileSystemStorage
|
||||
|
||||
|
||||
media_file_storage = FileSystemStorage(location=str(settings.DOWNLOAD_ROOT), base_url='/media-data/')
|
||||
|
||||
|
||||
def get_media_file_path(instance, filename):
|
||||
return instance.filepath
|
||||
|
||||
|
||||
def get_media_thumb_path(instance, filename):
|
||||
# we don't want to use alternate names for thumb files
|
||||
if instance.thumb:
|
||||
instance.thumb.delete(save=False)
|
||||
fileid = str(instance.uuid).lower()
|
||||
filename = f'{fileid}.jpg'
|
||||
prefix = fileid[:2]
|
||||
return Path('thumbs') / prefix / filename
|
||||
|
12
tubesync/sync/models/_private.py
Normal file
12
tubesync/sync/models/_private.py
Normal file
@ -0,0 +1,12 @@
|
||||
from ..choices import Val, YouTube_SourceType
|
||||
|
||||
|
||||
_srctype_dict = lambda n: dict(zip( YouTube_SourceType.values, (n,) * len(YouTube_SourceType.values) ))
|
||||
|
||||
|
||||
def _nfo_element(nfo, label, text, /, *, attrs={}, tail='\n', char=' ', indent=2):
|
||||
element = nfo.makeelement(label, attrs)
|
||||
element.text = text
|
||||
element.tail = tail + (char * indent)
|
||||
return element
|
||||
|
File diff suppressed because it is too large
Load Diff
115
tubesync/sync/models/media__tasks.py
Normal file
115
tubesync/sync/models/media__tasks.py
Normal file
@ -0,0 +1,115 @@
|
||||
import os
|
||||
from common.logger import log
|
||||
from common.errors import (
|
||||
NoMetadataException,
|
||||
)
|
||||
from django.utils import timezone
|
||||
from ..choices import Val, SourceResolution
|
||||
|
||||
|
||||
def download_checklist(self, skip_checks=False):
|
||||
media = self
|
||||
if skip_checks:
|
||||
return True
|
||||
|
||||
if not media.source.download_media:
|
||||
log.warn(f'Download task triggered for media: {media} (UUID: {media.pk}) but '
|
||||
f'the source {media.source} has since been marked to not download, '
|
||||
f'not downloading')
|
||||
return False
|
||||
if media.skip or media.manual_skip:
|
||||
# Media was toggled to be skipped after the task was scheduled
|
||||
log.warn(f'Download task triggered for media: {media} (UUID: {media.pk}) but '
|
||||
f'it is now marked to be skipped, not downloading')
|
||||
return False
|
||||
# metadata is required to generate the proper filepath
|
||||
if not media.has_metadata:
|
||||
raise NoMetadataException('Metadata is not yet available.')
|
||||
downloaded_file_exists = (
|
||||
media.downloaded and
|
||||
media.has_metadata and
|
||||
(
|
||||
media.media_file_exists or
|
||||
media.filepath.exists()
|
||||
)
|
||||
)
|
||||
if downloaded_file_exists:
|
||||
# Media has been marked as downloaded before the download_media task was fired,
|
||||
# skip it
|
||||
log.warn(f'Download task triggered for media: {media} (UUID: {media.pk}) but '
|
||||
f'it has already been marked as downloaded, not downloading again')
|
||||
return False
|
||||
max_cap_age = media.source.download_cap_date
|
||||
published = media.published
|
||||
if max_cap_age and published:
|
||||
if published <= max_cap_age:
|
||||
log.warn(f'Download task triggered media: {media} (UUID: {media.pk}) but '
|
||||
f'the source has a download cap and the media is now too old, '
|
||||
f'not downloading')
|
||||
return False
|
||||
|
||||
|
||||
def download_finished(self, format_str, container, downloaded_filepath=None):
|
||||
media = self
|
||||
if downloaded_filepath is None:
|
||||
downloaded_filepath = self.filepath
|
||||
filepath = Path(downloaded_filepath)
|
||||
|
||||
# Media has been downloaded successfully
|
||||
log.info(f'Successfully downloaded media: {media} (UUID: {media.pk}) to: '
|
||||
f'"{filepath}"')
|
||||
# Link the media file to the object and update info about the download
|
||||
self.media_file.name = str(filepath.relative_to(self.media_file.storage.location))
|
||||
media.downloaded = True
|
||||
media.download_date = timezone.now()
|
||||
media.downloaded_filesize = os.path.getsize(filepath)
|
||||
media.downloaded_container = container
|
||||
if '+' in format_str:
|
||||
# Seperate audio and video streams
|
||||
vformat_code, aformat_code = format_str.split('+')
|
||||
aformat = media.get_format_by_code(aformat_code)
|
||||
vformat = media.get_format_by_code(vformat_code)
|
||||
media.downloaded_format = vformat['format']
|
||||
media.downloaded_height = vformat['height']
|
||||
media.downloaded_width = vformat['width']
|
||||
media.downloaded_audio_codec = aformat['acodec']
|
||||
media.downloaded_video_codec = vformat['vcodec']
|
||||
media.downloaded_container = container
|
||||
media.downloaded_fps = vformat['fps']
|
||||
media.downloaded_hdr = vformat['is_hdr']
|
||||
else:
|
||||
# Combined stream or audio-only stream
|
||||
cformat_code = format_str
|
||||
cformat = media.get_format_by_code(cformat_code)
|
||||
media.downloaded_audio_codec = cformat['acodec']
|
||||
if cformat['vcodec']:
|
||||
# Combined
|
||||
media.downloaded_format = cformat['format']
|
||||
media.downloaded_height = cformat['height']
|
||||
media.downloaded_width = cformat['width']
|
||||
media.downloaded_video_codec = cformat['vcodec']
|
||||
media.downloaded_fps = cformat['fps']
|
||||
media.downloaded_hdr = cformat['is_hdr']
|
||||
else:
|
||||
self.downloaded_format = Val(SourceResolution.AUDIO)
|
||||
|
||||
|
||||
def wait_for_premiere(self):
|
||||
hours = lambda td: 1+int((24*td.days)+(td.seconds/(60*60)))
|
||||
|
||||
in_hours = None
|
||||
if self.has_metadata or not self.published:
|
||||
return (False, in_hours,)
|
||||
|
||||
now = timezone.now()
|
||||
if self.published < now:
|
||||
in_hours = 0
|
||||
self.manual_skip = False
|
||||
self.skip = False
|
||||
else:
|
||||
in_hours = hours(self.published - now)
|
||||
self.manual_skip = True
|
||||
self.title = _(f'Premieres in {in_hours} hours')
|
||||
|
||||
return (True, in_hours,)
|
||||
|
86
tubesync/sync/models/media_server.py
Normal file
86
tubesync/sync/models/media_server.py
Normal file
@ -0,0 +1,86 @@
|
||||
from common.json import JSONEncoder
|
||||
from django import db
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from ..choices import Val, MediaServerType
|
||||
|
||||
|
||||
class MediaServer(db.models.Model):
|
||||
'''
|
||||
A remote media server, such as a Plex server.
|
||||
'''
|
||||
|
||||
ICONS = {
|
||||
Val(MediaServerType.JELLYFIN): '<i class="fas fa-server"></i>',
|
||||
Val(MediaServerType.PLEX): '<i class="fas fa-server"></i>',
|
||||
}
|
||||
HANDLERS = MediaServerType.handlers_dict()
|
||||
|
||||
server_type = db.models.CharField(
|
||||
_('server type'),
|
||||
max_length=1,
|
||||
db_index=True,
|
||||
choices=MediaServerType.choices,
|
||||
default=MediaServerType.PLEX,
|
||||
help_text=_('Server type'),
|
||||
)
|
||||
host = db.models.CharField(
|
||||
_('host'),
|
||||
db_index=True,
|
||||
max_length=200,
|
||||
help_text=_('Hostname or IP address of the media server'),
|
||||
)
|
||||
port = db.models.PositiveIntegerField(
|
||||
_('port'),
|
||||
db_index=True,
|
||||
help_text=_('Port number of the media server'),
|
||||
)
|
||||
use_https = db.models.BooleanField(
|
||||
_('use https'),
|
||||
default=False,
|
||||
help_text=_('Connect to the media server over HTTPS'),
|
||||
)
|
||||
verify_https = db.models.BooleanField(
|
||||
_('verify https'),
|
||||
default=True,
|
||||
help_text=_('If connecting over HTTPS, verify the SSL certificate is valid'),
|
||||
)
|
||||
options = db.models.JSONField(
|
||||
_('options'),
|
||||
encoder=JSONEncoder,
|
||||
blank=False,
|
||||
null=True,
|
||||
help_text=_('Options for the media server'),
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return f'{self.get_server_type_display()} server at {self.url}'
|
||||
|
||||
class Meta:
|
||||
verbose_name = _('Media Server')
|
||||
verbose_name_plural = _('Media Servers')
|
||||
unique_together = (
|
||||
('host', 'port'),
|
||||
)
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
scheme = 'https' if self.use_https else 'http'
|
||||
return f'{scheme}://{self.host.strip()}:{self.port}'
|
||||
|
||||
@property
|
||||
def icon(self):
|
||||
return self.ICONS.get(self.server_type)
|
||||
|
||||
@property
|
||||
def handler(self):
|
||||
handler_class = self.HANDLERS.get(self.server_type)
|
||||
return handler_class(self)
|
||||
|
||||
def validate(self):
|
||||
return self.handler.validate()
|
||||
|
||||
def update(self):
|
||||
return self.handler.update()
|
||||
|
||||
def get_help_html(self):
|
||||
return self.handler.HELP
|
153
tubesync/sync/models/metadata.py
Normal file
153
tubesync/sync/models/metadata.py
Normal file
@ -0,0 +1,153 @@
|
||||
import uuid
|
||||
from common.json import JSONEncoder
|
||||
from common.timestamp import timestamp_to_datetime
|
||||
from common.utils import django_queryset_generator as qs_gen
|
||||
from django import db
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from .media import Media
|
||||
|
||||
|
||||
class Metadata(db.models.Model):
|
||||
'''
|
||||
Metadata for an indexed `Media` item.
|
||||
'''
|
||||
class Meta:
|
||||
db_table = 'sync_media_metadata'
|
||||
verbose_name = _('Metadata about Media')
|
||||
verbose_name_plural = _('Metadata about Media')
|
||||
unique_together = (
|
||||
('media', 'site', 'key'),
|
||||
)
|
||||
get_latest_by = ["-retrieved", "-created"]
|
||||
|
||||
uuid = db.models.UUIDField(
|
||||
_('uuid'),
|
||||
primary_key=True,
|
||||
editable=False,
|
||||
default=uuid.uuid4,
|
||||
help_text=_('UUID of the metadata'),
|
||||
)
|
||||
media = db.models.OneToOneField(
|
||||
Media,
|
||||
# on_delete=models.DO_NOTHING,
|
||||
on_delete=db.models.SET_NULL,
|
||||
related_name='new_metadata',
|
||||
help_text=_('Media the metadata belongs to'),
|
||||
null=True,
|
||||
parent_link=False,
|
||||
)
|
||||
site = db.models.CharField(
|
||||
_('site'),
|
||||
max_length=256,
|
||||
blank=True,
|
||||
db_index=True,
|
||||
null=False,
|
||||
default='Youtube',
|
||||
help_text=_('Site from which the metadata was retrieved'),
|
||||
)
|
||||
key = db.models.CharField(
|
||||
_('key'),
|
||||
max_length=256,
|
||||
blank=True,
|
||||
db_index=True,
|
||||
null=False,
|
||||
default='',
|
||||
help_text=_('Media identifier at the site from which the metadata was retrieved'),
|
||||
)
|
||||
created = db.models.DateTimeField(
|
||||
_('created'),
|
||||
auto_now_add=True,
|
||||
db_index=True,
|
||||
help_text=_('Date and time the metadata was created'),
|
||||
)
|
||||
retrieved = db.models.DateTimeField(
|
||||
_('retrieved'),
|
||||
auto_now_add=True,
|
||||
db_index=True,
|
||||
help_text=_('Date and time the metadata was retrieved'),
|
||||
)
|
||||
uploaded = db.models.DateTimeField(
|
||||
_('uploaded'),
|
||||
db_index=True,
|
||||
null=True,
|
||||
help_text=_('Date and time the media was uploaded'),
|
||||
)
|
||||
published = db.models.DateTimeField(
|
||||
_('published'),
|
||||
db_index=True,
|
||||
null=True,
|
||||
help_text=_('Date and time the media was published'),
|
||||
)
|
||||
value = db.models.JSONField(
|
||||
_('value'),
|
||||
encoder=JSONEncoder,
|
||||
null=False,
|
||||
default=dict,
|
||||
help_text=_('JSON metadata object'),
|
||||
)
|
||||
|
||||
|
||||
def __str__(self):
|
||||
template = '"{}" from {} at: {}'
|
||||
return template.format(
|
||||
self.key,
|
||||
self.site,
|
||||
self.retrieved.isoformat(timespec='seconds'),
|
||||
)
|
||||
|
||||
@db.transaction.atomic(durable=False)
|
||||
def ingest_formats(self, formats=list(), /):
|
||||
number = 0
|
||||
for number, format in enumerate(formats, start=1):
|
||||
mdf, created = self.format.get_or_create(site=self.site, key=self.key, number=number)
|
||||
mdf.value = format
|
||||
mdf.save()
|
||||
if number > 0:
|
||||
# delete any numbers we did not overwrite or create
|
||||
self.format.filter(site=self.site, key=self.key, number__gt=number).delete()
|
||||
|
||||
@property
|
||||
def with_formats(self):
|
||||
formats = self.format.all().order_by('number')
|
||||
formats_list = [ f.value for f in qs_gen(formats) ]
|
||||
metadata = self.value.copy()
|
||||
metadata.update(dict(formats=formats_list))
|
||||
return metadata
|
||||
|
||||
@db.transaction.atomic(durable=False)
|
||||
def ingest_metadata(self, data):
|
||||
assert isinstance(data, dict), type(data)
|
||||
|
||||
try:
|
||||
self.retrieved = timestamp_to_datetime(
|
||||
self.media.get_metadata_first_value(
|
||||
'epoch',
|
||||
arg_dict=data,
|
||||
)
|
||||
) or self.created
|
||||
except AssertionError:
|
||||
self.retrieved = self.created
|
||||
|
||||
try:
|
||||
self.published = timestamp_to_datetime(
|
||||
self.media.get_metadata_first_value(
|
||||
('release_timestamp', 'timestamp',),
|
||||
arg_dict=data,
|
||||
)
|
||||
) or self.media.published
|
||||
except AssertionError:
|
||||
self.published = self.media.published
|
||||
|
||||
self.value = data.copy() # try not to have side-effects for the caller
|
||||
formats_key = self.media.get_metadata_field('formats')
|
||||
formats = self.value.pop(formats_key, list())
|
||||
self.uploaded = min(
|
||||
self.published,
|
||||
self.retrieved,
|
||||
self.media.created,
|
||||
)
|
||||
self.save()
|
||||
self.ingest_formats(formats)
|
||||
|
||||
return self.with_formats
|
||||
|
75
tubesync/sync/models/metadata_format.py
Normal file
75
tubesync/sync/models/metadata_format.py
Normal file
@ -0,0 +1,75 @@
|
||||
import uuid
|
||||
from common.json import JSONEncoder
|
||||
from django import db
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from .metadata import Metadata
|
||||
|
||||
class MetadataFormat(db.models.Model):
|
||||
'''
|
||||
A format from the Metadata for an indexed `Media` item.
|
||||
'''
|
||||
class Meta:
|
||||
db_table = f'{Metadata._meta.db_table}_format'
|
||||
verbose_name = _('Format from Media Metadata')
|
||||
verbose_name_plural = _('Formats from Media Metadata')
|
||||
unique_together = (
|
||||
('metadata', 'site', 'key', 'number'),
|
||||
)
|
||||
ordering = ['site', 'key', 'number']
|
||||
|
||||
uuid = db.models.UUIDField(
|
||||
_('uuid'),
|
||||
primary_key=True,
|
||||
editable=False,
|
||||
default=uuid.uuid4,
|
||||
help_text=_('UUID of the format'),
|
||||
)
|
||||
metadata = db.models.ForeignKey(
|
||||
Metadata,
|
||||
# on_delete=models.DO_NOTHING,
|
||||
on_delete=db.models.CASCADE,
|
||||
related_name='format',
|
||||
help_text=_('Metadata the format belongs to'),
|
||||
null=False,
|
||||
)
|
||||
site = db.models.CharField(
|
||||
_('site'),
|
||||
max_length=256,
|
||||
blank=True,
|
||||
db_index=True,
|
||||
null=False,
|
||||
default='Youtube',
|
||||
help_text=_('Site from which the format is available'),
|
||||
)
|
||||
key = db.models.CharField(
|
||||
_('key'),
|
||||
max_length=256,
|
||||
blank=True,
|
||||
db_index=True,
|
||||
null=False,
|
||||
default='',
|
||||
help_text=_('Media identifier at the site from which this format is available'),
|
||||
)
|
||||
number = db.models.PositiveIntegerField(
|
||||
_('number'),
|
||||
blank=False,
|
||||
null=False,
|
||||
help_text=_('Ordering number for this format'),
|
||||
)
|
||||
value = db.models.JSONField(
|
||||
_('value'),
|
||||
encoder=JSONEncoder,
|
||||
null=False,
|
||||
default=dict,
|
||||
help_text=_('JSON metadata format object'),
|
||||
)
|
||||
|
||||
|
||||
def __str__(self):
|
||||
template = '#{:n} "{}" from {}: {}'
|
||||
return template.format(
|
||||
self.number,
|
||||
self.key,
|
||||
self.site,
|
||||
self.value.get('format') or self.value.get('format_id'),
|
||||
)
|
549
tubesync/sync/models/source.py
Normal file
549
tubesync/sync/models/source.py
Normal file
@ -0,0 +1,549 @@
|
||||
import os
|
||||
import re
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from django import db
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import SuspiciousOperation
|
||||
from django.core.validators import RegexValidator
|
||||
from django.utils import timezone
|
||||
from django.utils.text import slugify
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from ..choices import (Val,
|
||||
SponsorBlock_Category, YouTube_SourceType, IndexSchedule,
|
||||
CapChoices, Fallback, FileExtension, FilterSeconds,
|
||||
SourceResolution, SourceResolutionInteger,
|
||||
YouTube_VideoCodec, YouTube_AudioCodec,
|
||||
)
|
||||
from ..fields import CommaSepChoiceField
|
||||
from ..youtube import (
|
||||
get_media_info as get_youtube_media_info,
|
||||
get_channel_image_info as get_youtube_channel_image_info,
|
||||
)
|
||||
from ._migrations import media_file_storage
|
||||
from ._private import _srctype_dict
|
||||
|
||||
|
||||
class Source(db.models.Model):
|
||||
'''
|
||||
A Source is a source of media. Currently, this is either a YouTube channel
|
||||
or a YouTube playlist.
|
||||
'''
|
||||
|
||||
sponsorblock_categories = CommaSepChoiceField(
|
||||
_(''),
|
||||
max_length=128,
|
||||
possible_choices=SponsorBlock_Category.choices,
|
||||
all_choice='all',
|
||||
allow_all=True,
|
||||
all_label='(All Categories)',
|
||||
default='all',
|
||||
help_text=_('Select the SponsorBlock categories that you wish to be removed from downloaded videos.'),
|
||||
)
|
||||
embed_metadata = db.models.BooleanField(
|
||||
_('embed metadata'),
|
||||
default=False,
|
||||
help_text=_('Embed metadata from source into file'),
|
||||
)
|
||||
embed_thumbnail = db.models.BooleanField(
|
||||
_('embed thumbnail'),
|
||||
default=False,
|
||||
help_text=_('Embed thumbnail into the file'),
|
||||
)
|
||||
enable_sponsorblock = db.models.BooleanField(
|
||||
_('enable sponsorblock'),
|
||||
default=True,
|
||||
help_text=_('Use SponsorBlock?'),
|
||||
)
|
||||
|
||||
# Fontawesome icons used for the source on the front end
|
||||
ICONS = _srctype_dict('<i class="fab fa-youtube"></i>')
|
||||
|
||||
# Format to use to display a URL for the source
|
||||
URLS = dict(zip(
|
||||
YouTube_SourceType.values,
|
||||
(
|
||||
'https://www.youtube.com/c/{key}',
|
||||
'https://www.youtube.com/channel/{key}',
|
||||
'https://www.youtube.com/playlist?list={key}',
|
||||
),
|
||||
))
|
||||
|
||||
# Format used to create indexable URLs
|
||||
INDEX_URLS = dict(zip(
|
||||
YouTube_SourceType.values,
|
||||
(
|
||||
'https://www.youtube.com/c/{key}/{type}',
|
||||
'https://www.youtube.com/channel/{key}/{type}',
|
||||
'https://www.youtube.com/playlist?list={key}',
|
||||
),
|
||||
))
|
||||
|
||||
# Callback functions to get a list of media from the source
|
||||
INDEXERS = _srctype_dict(get_youtube_media_info)
|
||||
|
||||
# Field names to find the media ID used as the key when storing media
|
||||
KEY_FIELD = _srctype_dict('id')
|
||||
|
||||
uuid = db.models.UUIDField(
|
||||
_('uuid'),
|
||||
primary_key=True,
|
||||
editable=False,
|
||||
default=uuid.uuid4,
|
||||
help_text=_('UUID of the source'),
|
||||
)
|
||||
created = db.models.DateTimeField(
|
||||
_('created'),
|
||||
auto_now_add=True,
|
||||
db_index=True,
|
||||
help_text=_('Date and time the source was created'),
|
||||
)
|
||||
last_crawl = db.models.DateTimeField(
|
||||
_('last crawl'),
|
||||
db_index=True,
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text=_('Date and time the source was last crawled'),
|
||||
)
|
||||
source_type = db.models.CharField(
|
||||
_('source type'),
|
||||
max_length=1,
|
||||
db_index=True,
|
||||
choices=YouTube_SourceType.choices,
|
||||
default=YouTube_SourceType.CHANNEL,
|
||||
help_text=_('Source type'),
|
||||
)
|
||||
key = db.models.CharField(
|
||||
_('key'),
|
||||
max_length=100,
|
||||
db_index=True,
|
||||
unique=True,
|
||||
help_text=_('Source key, such as exact YouTube channel name or playlist ID'),
|
||||
)
|
||||
name = db.models.CharField(
|
||||
_('name'),
|
||||
max_length=100,
|
||||
db_index=True,
|
||||
unique=True,
|
||||
help_text=_('Friendly name for the source, used locally in TubeSync only'),
|
||||
)
|
||||
directory = db.models.CharField(
|
||||
_('directory'),
|
||||
max_length=100,
|
||||
db_index=True,
|
||||
unique=True,
|
||||
help_text=_('Directory name to save the media into'),
|
||||
)
|
||||
media_format = db.models.CharField(
|
||||
_('media format'),
|
||||
max_length=200,
|
||||
default=settings.MEDIA_FORMATSTR_DEFAULT,
|
||||
help_text=_('File format to use for saving files, detailed options at bottom of page.'),
|
||||
)
|
||||
index_schedule = db.models.IntegerField(
|
||||
_('index schedule'),
|
||||
choices=IndexSchedule.choices,
|
||||
db_index=True,
|
||||
default=IndexSchedule.EVERY_24_HOURS,
|
||||
help_text=_('Schedule of how often to index the source for new media'),
|
||||
)
|
||||
download_media = db.models.BooleanField(
|
||||
_('download media'),
|
||||
default=True,
|
||||
help_text=_('Download media from this source, if not selected the source will only be indexed'),
|
||||
)
|
||||
index_videos = db.models.BooleanField(
|
||||
_('index videos'),
|
||||
default=True,
|
||||
help_text=_('Index video media from this source'),
|
||||
)
|
||||
index_streams = db.models.BooleanField(
|
||||
_('index streams'),
|
||||
default=False,
|
||||
help_text=_('Index live stream media from this source'),
|
||||
)
|
||||
download_cap = db.models.IntegerField(
|
||||
_('download cap'),
|
||||
choices=CapChoices.choices,
|
||||
default=CapChoices.CAP_NOCAP,
|
||||
help_text=_('Do not download media older than this capped date'),
|
||||
)
|
||||
delete_old_media = db.models.BooleanField(
|
||||
_('delete old media'),
|
||||
default=False,
|
||||
help_text=_('Delete old media after "days to keep" days?'),
|
||||
)
|
||||
days_to_keep = db.models.PositiveSmallIntegerField(
|
||||
_('days to keep'),
|
||||
default=14,
|
||||
help_text=_(
|
||||
'If "delete old media" is ticked, the number of days after which '
|
||||
'to automatically delete media'
|
||||
),
|
||||
)
|
||||
filter_text = db.models.CharField(
|
||||
_('filter string'),
|
||||
max_length=200,
|
||||
default='',
|
||||
blank=True,
|
||||
help_text=_('Regex compatible filter string for video titles'),
|
||||
)
|
||||
filter_text_invert = db.models.BooleanField(
|
||||
_('invert filter text matching'),
|
||||
default=False,
|
||||
help_text=_('Invert filter string regex match, skip any matching titles when selected'),
|
||||
)
|
||||
filter_seconds = db.models.PositiveIntegerField(
|
||||
_('filter seconds'),
|
||||
blank=True,
|
||||
null=True,
|
||||
help_text=_('Filter Media based on Min/Max duration. Leave blank or 0 to disable filtering'),
|
||||
)
|
||||
filter_seconds_min = db.models.BooleanField(
|
||||
_('filter seconds min/max'),
|
||||
choices=FilterSeconds.choices,
|
||||
default=Val(FilterSeconds.MIN),
|
||||
help_text=_(
|
||||
'When Filter Seconds is > 0, do we skip on minimum (video shorter than limit) or maximum (video '
|
||||
'greater than maximum) video duration'
|
||||
),
|
||||
)
|
||||
delete_removed_media = db.models.BooleanField(
|
||||
_('delete removed media'),
|
||||
default=False,
|
||||
help_text=_('Delete media that is no longer on this playlist'),
|
||||
)
|
||||
delete_files_on_disk = db.models.BooleanField(
|
||||
_('delete files on disk'),
|
||||
default=False,
|
||||
help_text=_('Delete files on disk when they are removed from TubeSync'),
|
||||
)
|
||||
source_resolution = db.models.CharField(
|
||||
_('source resolution'),
|
||||
max_length=8,
|
||||
db_index=True,
|
||||
choices=SourceResolution.choices,
|
||||
default=SourceResolution.VIDEO_1080P,
|
||||
help_text=_('Source resolution, desired video resolution to download'),
|
||||
)
|
||||
source_vcodec = db.models.CharField(
|
||||
_('source video codec'),
|
||||
max_length=8,
|
||||
db_index=True,
|
||||
choices=YouTube_VideoCodec.choices,
|
||||
default=YouTube_VideoCodec.VP9,
|
||||
help_text=_('Source video codec, desired video encoding format to download (ignored if "resolution" is audio only)'),
|
||||
)
|
||||
source_acodec = db.models.CharField(
|
||||
_('source audio codec'),
|
||||
max_length=8,
|
||||
db_index=True,
|
||||
choices=YouTube_AudioCodec.choices,
|
||||
default=YouTube_AudioCodec.OPUS,
|
||||
help_text=_('Source audio codec, desired audio encoding format to download'),
|
||||
)
|
||||
prefer_60fps = db.models.BooleanField(
|
||||
_('prefer 60fps'),
|
||||
default=True,
|
||||
help_text=_('Where possible, prefer 60fps media for this source'),
|
||||
)
|
||||
prefer_hdr = db.models.BooleanField(
|
||||
_('prefer hdr'),
|
||||
default=False,
|
||||
help_text=_('Where possible, prefer HDR media for this source'),
|
||||
)
|
||||
fallback = db.models.CharField(
|
||||
_('fallback'),
|
||||
max_length=1,
|
||||
db_index=True,
|
||||
choices=Fallback.choices,
|
||||
default=Fallback.NEXT_BEST_HD,
|
||||
help_text=_('What do do when media in your source resolution and codecs is not available'),
|
||||
)
|
||||
copy_channel_images = db.models.BooleanField(
|
||||
_('copy channel images'),
|
||||
default=False,
|
||||
help_text=_('Copy channel banner and avatar. These may be detected and used by some media servers'),
|
||||
)
|
||||
copy_thumbnails = db.models.BooleanField(
|
||||
_('copy thumbnails'),
|
||||
default=False,
|
||||
help_text=_('Copy thumbnails with the media, these may be detected and used by some media servers'),
|
||||
)
|
||||
write_nfo = db.models.BooleanField(
|
||||
_('write nfo'),
|
||||
default=False,
|
||||
help_text=_('Write an NFO file in XML with the media info, these may be detected and used by some media servers'),
|
||||
)
|
||||
write_json = db.models.BooleanField(
|
||||
_('write json'),
|
||||
default=False,
|
||||
help_text=_('Write a JSON file with the media info, these may be detected and used by some media servers'),
|
||||
)
|
||||
has_failed = db.models.BooleanField(
|
||||
_('has failed'),
|
||||
default=False,
|
||||
help_text=_('Source has failed to index media'),
|
||||
)
|
||||
|
||||
write_subtitles = db.models.BooleanField(
|
||||
_('write subtitles'),
|
||||
default=False,
|
||||
help_text=_('Download video subtitles'),
|
||||
)
|
||||
|
||||
auto_subtitles = db.models.BooleanField(
|
||||
_('accept auto-generated subs'),
|
||||
default=False,
|
||||
help_text=_('Accept auto-generated subtitles'),
|
||||
)
|
||||
sub_langs = db.models.CharField(
|
||||
_('subs langs'),
|
||||
max_length=30,
|
||||
default='en',
|
||||
help_text=_('List of subtitles langs to download, comma-separated. Example: en,fr or all,-fr,-live_chat'),
|
||||
validators=[
|
||||
RegexValidator(
|
||||
regex=r"^(\-?[\_\.a-zA-Z-]+(,|$))+",
|
||||
message=_('Subtitle langs must be a comma-separated list of langs. example: en,fr or all,-fr,-live_chat'),
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
class Meta:
|
||||
verbose_name = _('Source')
|
||||
verbose_name_plural = _('Sources')
|
||||
|
||||
@property
|
||||
def icon(self):
|
||||
return self.ICONS.get(self.source_type)
|
||||
|
||||
@property
|
||||
def slugname(self):
|
||||
replaced = self.name.replace('_', '-').replace('&', 'and').replace('+', 'and')
|
||||
return slugify(replaced)[:80]
|
||||
|
||||
def deactivate(self):
|
||||
self.download_media = False
|
||||
self.index_streams = False
|
||||
self.index_videos = False
|
||||
self.index_schedule = IndexSchedule.NEVER
|
||||
self.save(update_fields={
|
||||
'download_media',
|
||||
'index_streams',
|
||||
'index_videos',
|
||||
'index_schedule',
|
||||
})
|
||||
|
||||
@property
|
||||
def is_active(self):
|
||||
active = (
|
||||
self.download_media or
|
||||
self.index_streams or
|
||||
self.index_videos
|
||||
)
|
||||
return self.index_schedule and active
|
||||
|
||||
@property
|
||||
def is_audio(self):
|
||||
return self.source_resolution == SourceResolution.AUDIO.value
|
||||
|
||||
@property
|
||||
def is_playlist(self):
|
||||
return self.source_type == YouTube_SourceType.PLAYLIST.value
|
||||
|
||||
@property
|
||||
def is_video(self):
|
||||
return not self.is_audio
|
||||
|
||||
@property
|
||||
def download_cap_date(self):
|
||||
delta = self.download_cap
|
||||
if delta > 0:
|
||||
return timezone.now() - timezone.timedelta(seconds=delta)
|
||||
else:
|
||||
return False
|
||||
|
||||
@property
|
||||
def days_to_keep_date(self):
|
||||
delta = self.days_to_keep
|
||||
if delta > 0:
|
||||
return timezone.now() - timezone.timedelta(days=delta)
|
||||
else:
|
||||
return False
|
||||
|
||||
@property
|
||||
def extension(self):
|
||||
'''
|
||||
The extension is also used by youtube-dl to set the output container. As
|
||||
it is possible to quite easily pick combinations of codecs and containers
|
||||
which are invalid (e.g. OPUS audio in an MP4 container) just set this for
|
||||
people. All video is set to mkv containers, audio-only is set to m4a or ogg
|
||||
depending on audio codec.
|
||||
'''
|
||||
if self.is_audio:
|
||||
if self.source_acodec == Val(YouTube_AudioCodec.MP4A):
|
||||
return Val(FileExtension.M4A)
|
||||
elif self.source_acodec == Val(YouTube_AudioCodec.OPUS):
|
||||
return Val(FileExtension.OGG)
|
||||
else:
|
||||
raise ValueError('Unable to choose audio extension, uknown acodec')
|
||||
else:
|
||||
return Val(FileExtension.MKV)
|
||||
|
||||
@classmethod
|
||||
def create_url(cls, source_type, key):
|
||||
url = cls.URLS.get(source_type)
|
||||
return url.format(key=key)
|
||||
|
||||
@classmethod
|
||||
def create_index_url(cls, source_type, key, type):
|
||||
url = cls.INDEX_URLS.get(source_type)
|
||||
return url.format(key=key, type=type)
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
return self.__class__.create_url(self.source_type, self.key)
|
||||
|
||||
def get_index_url(self, type):
|
||||
return self.__class__.create_index_url(self.source_type, self.key, type)
|
||||
|
||||
@property
|
||||
def format_summary(self):
|
||||
if self.is_audio:
|
||||
vc = 'none'
|
||||
else:
|
||||
vc = self.source_vcodec
|
||||
ac = self.source_acodec
|
||||
f = ' 60FPS' if self.is_video and self.prefer_60fps else ''
|
||||
h = ' HDR' if self.is_video and self.prefer_hdr else ''
|
||||
return f'{self.source_resolution} (video:{vc}, audio:{ac}){f}{h}'.strip()
|
||||
|
||||
@property
|
||||
def directory_path(self):
|
||||
download_dir = Path(media_file_storage.location)
|
||||
return download_dir / self.type_directory_path
|
||||
|
||||
@property
|
||||
def type_directory_path(self):
|
||||
if settings.SOURCE_DOWNLOAD_DIRECTORY_PREFIX:
|
||||
if self.is_audio:
|
||||
return Path(settings.DOWNLOAD_AUDIO_DIR) / self.directory
|
||||
else:
|
||||
return Path(settings.DOWNLOAD_VIDEO_DIR) / self.directory
|
||||
else:
|
||||
return Path(self.directory)
|
||||
|
||||
def make_directory(self):
|
||||
return os.makedirs(self.directory_path, exist_ok=True)
|
||||
|
||||
@property
|
||||
def get_image_url(self):
|
||||
if self.is_playlist:
|
||||
raise SuspiciousOperation('This source is a playlist so it doesn\'t have thumbnail.')
|
||||
|
||||
return get_youtube_channel_image_info(self.url)
|
||||
|
||||
|
||||
def directory_exists(self):
|
||||
return (os.path.isdir(self.directory_path) and
|
||||
os.access(self.directory_path, os.W_OK))
|
||||
|
||||
@property
|
||||
def key_field(self):
|
||||
return self.KEY_FIELD.get(self.source_type, '')
|
||||
|
||||
@property
|
||||
def source_resolution_height(self):
|
||||
return SourceResolutionInteger.get(self.source_resolution, 0)
|
||||
|
||||
@property
|
||||
def can_fallback(self):
|
||||
return self.fallback != Val(Fallback.FAIL)
|
||||
|
||||
@property
|
||||
def example_media_format_dict(self):
|
||||
'''
|
||||
Populates a dict with real-ish and some placeholder data for media name
|
||||
format strings. Used for example filenames and media_format validation.
|
||||
'''
|
||||
fmt = []
|
||||
if self.source_resolution:
|
||||
fmt.append(self.source_resolution)
|
||||
if self.source_vcodec:
|
||||
fmt.append(self.source_vcodec.lower())
|
||||
if self.source_acodec:
|
||||
fmt.append(self.source_acodec.lower())
|
||||
if self.prefer_60fps:
|
||||
fmt.append('60fps')
|
||||
if self.prefer_hdr:
|
||||
fmt.append('hdr')
|
||||
now = timezone.now()
|
||||
return {
|
||||
'yyyymmdd': now.strftime('%Y%m%d'),
|
||||
'yyyy_mm_dd': now.strftime('%Y-%m-%d'),
|
||||
'yyyy': now.strftime('%Y'),
|
||||
'mm': now.strftime('%m'),
|
||||
'dd': now.strftime('%d'),
|
||||
'source': self.slugname,
|
||||
'source_full': self.name,
|
||||
'uploader': 'Some Channel Name',
|
||||
'title': 'some-media-title-name',
|
||||
'title_full': 'Some Media Title Name',
|
||||
'key': 'SoMeUnIqUiD',
|
||||
'format': '-'.join(fmt),
|
||||
'playlist_title': 'Some Playlist Title',
|
||||
'video_order': '01',
|
||||
'ext': self.extension,
|
||||
'resolution': self.source_resolution if self.source_resolution else '',
|
||||
'height': '720' if self.source_resolution else '',
|
||||
'width': '1280' if self.source_resolution else '',
|
||||
'vcodec': self.source_vcodec.lower() if self.source_vcodec else '',
|
||||
'acodec': self.source_acodec.lower(),
|
||||
'fps': '24' if self.source_resolution else '',
|
||||
'hdr': 'hdr' if self.source_resolution else ''
|
||||
}
|
||||
|
||||
def get_example_media_format(self):
|
||||
try:
|
||||
return self.media_format.format(**self.example_media_format_dict)
|
||||
except Exception as e:
|
||||
return ''
|
||||
|
||||
def is_regex_match(self, media_item_title):
|
||||
if not self.filter_text:
|
||||
return True
|
||||
return bool(re.search(self.filter_text, media_item_title))
|
||||
|
||||
def get_index(self, type):
|
||||
indexer = self.INDEXERS.get(self.source_type, None)
|
||||
if not callable(indexer):
|
||||
raise Exception(f'Source type f"{self.source_type}" has no indexer')
|
||||
days = None
|
||||
if self.download_cap_date:
|
||||
days = timezone.timedelta(seconds=self.download_cap).days
|
||||
response = indexer(self.get_index_url(type=type), days=days)
|
||||
if not isinstance(response, dict):
|
||||
return []
|
||||
entries = response.get('entries', [])
|
||||
return entries
|
||||
|
||||
def index_media(self):
|
||||
'''
|
||||
Index the media source returning a list of media metadata as dicts.
|
||||
'''
|
||||
entries = list()
|
||||
if self.index_videos:
|
||||
entries += self.get_index('videos')
|
||||
# Playlists do something different that I have yet to figure out
|
||||
if not self.is_playlist:
|
||||
if self.index_streams:
|
||||
entries += self.get_index('streams')
|
||||
|
||||
if settings.MAX_ENTRIES_PROCESSING:
|
||||
entries = entries[:settings.MAX_ENTRIES_PROCESSING]
|
||||
return entries
|
||||
|
@ -1,14 +1,16 @@
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
from shutil import rmtree
|
||||
from tempfile import TemporaryDirectory
|
||||
from django.conf import settings
|
||||
from django.db import IntegrityError
|
||||
from django.db.models.signals import pre_save, post_save, pre_delete, post_delete
|
||||
from django.db.transaction import on_commit
|
||||
from django.dispatch import receiver
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from background_task.signals import task_failed
|
||||
from background_task.models import Task
|
||||
from common.logger import log
|
||||
from .models import Source, Media, MediaServer
|
||||
from .models import Source, Media, MediaServer, Metadata
|
||||
from .tasks import (delete_task_by_source, delete_task_by_media, index_source_task,
|
||||
download_media_thumbnail, download_media_metadata,
|
||||
map_task_to_instance, check_source_directory_exists,
|
||||
@ -134,6 +136,7 @@ def source_post_save(sender, instance, created, **kwargs):
|
||||
def source_pre_delete(sender, instance, **kwargs):
|
||||
# Triggered before a source is deleted, delete all media objects to trigger
|
||||
# the Media models post_delete signal
|
||||
source = instance
|
||||
log.info(f'Deactivating source: {instance.name}')
|
||||
instance.deactivate()
|
||||
log.info(f'Deleting tasks for source: {instance.name}')
|
||||
@ -141,20 +144,22 @@ def source_pre_delete(sender, instance, **kwargs):
|
||||
delete_task_by_source('sync.tasks.check_source_directory_exists', instance.pk)
|
||||
delete_task_by_source('sync.tasks.rename_all_media_for_source', instance.pk)
|
||||
delete_task_by_source('sync.tasks.save_all_media_for_source', instance.pk)
|
||||
# Schedule deletion of media
|
||||
delete_task_by_source('sync.tasks.delete_all_media_for_source', instance.pk)
|
||||
verbose_name = _('Deleting all media for source "{}"')
|
||||
delete_all_media_for_source(
|
||||
str(instance.pk),
|
||||
str(instance.name),
|
||||
verbose_name=verbose_name.format(instance.name),
|
||||
)
|
||||
# Try to do it all immediately
|
||||
# If this is killed, the scheduled task should do the work instead.
|
||||
delete_all_media_for_source.now(
|
||||
str(instance.pk),
|
||||
str(instance.name),
|
||||
)
|
||||
|
||||
# Fetch the media source
|
||||
sqs = Source.objects.filter(filter_text=str(source.pk))
|
||||
if sqs.count():
|
||||
media_source = sqs[0]
|
||||
# Schedule deletion of media
|
||||
delete_task_by_source('sync.tasks.delete_all_media_for_source', media_source.pk)
|
||||
verbose_name = _('Deleting all media for source "{}"')
|
||||
on_commit(partial(
|
||||
delete_all_media_for_source,
|
||||
str(media_source.pk),
|
||||
str(media_source.name),
|
||||
str(media_source.directory_path),
|
||||
priority=1,
|
||||
verbose_name=verbose_name.format(media_source.name),
|
||||
))
|
||||
|
||||
|
||||
@receiver(post_delete, sender=Source)
|
||||
@ -164,14 +169,8 @@ def source_post_delete(sender, instance, **kwargs):
|
||||
log.info(f'Deleting tasks for removed source: {source.name}')
|
||||
delete_task_by_source('sync.tasks.index_source_task', instance.pk)
|
||||
delete_task_by_source('sync.tasks.check_source_directory_exists', instance.pk)
|
||||
delete_task_by_source('sync.tasks.delete_all_media_for_source', instance.pk)
|
||||
delete_task_by_source('sync.tasks.rename_all_media_for_source', instance.pk)
|
||||
delete_task_by_source('sync.tasks.save_all_media_for_source', instance.pk)
|
||||
# Remove the directory, if the user requested that
|
||||
directory_path = Path(source.directory_path)
|
||||
if (directory_path / '.to_be_removed').is_file():
|
||||
log.info(f'Deleting directory for: {source.name}: {directory_path}')
|
||||
rmtree(directory_path, True)
|
||||
|
||||
|
||||
@receiver(task_failed, sender=Task)
|
||||
@ -207,15 +206,15 @@ def media_post_save(sender, instance, created, **kwargs):
|
||||
if not existing_media_download_task:
|
||||
# Recalculate the "can_download" flag, this may
|
||||
# need to change if the source specifications have been changed
|
||||
if instance.metadata:
|
||||
if media.has_metadata:
|
||||
if instance.get_format_str():
|
||||
if not instance.can_download:
|
||||
instance.can_download = True
|
||||
can_download_changed = True
|
||||
else:
|
||||
if instance.can_download:
|
||||
instance.can_download = False
|
||||
can_download_changed = True
|
||||
else:
|
||||
if instance.can_download:
|
||||
instance.can_download = False
|
||||
can_download_changed = True
|
||||
# Recalculate the "skip_changed" flag
|
||||
skip_changed = filter_media(instance)
|
||||
else:
|
||||
@ -237,12 +236,12 @@ def media_post_save(sender, instance, created, **kwargs):
|
||||
)
|
||||
|
||||
# If the media is missing metadata schedule it to be downloaded
|
||||
if not (instance.skip or instance.metadata or existing_media_metadata_task):
|
||||
if not (media.skip or media.has_metadata or existing_media_metadata_task):
|
||||
log.info(f'Scheduling task to download metadata for: {instance.url}')
|
||||
verbose_name = _('Downloading metadata for "{}"')
|
||||
verbose_name = _('Downloading metadata for: {}: "{}"')
|
||||
download_media_metadata(
|
||||
str(instance.pk),
|
||||
verbose_name=verbose_name.format(instance.pk),
|
||||
verbose_name=verbose_name.format(media.key, media.name),
|
||||
)
|
||||
# If the media is missing a thumbnail schedule it to be downloaded (unless we are skipping this media)
|
||||
if not instance.thumb_file_exists:
|
||||
@ -250,16 +249,25 @@ def media_post_save(sender, instance, created, **kwargs):
|
||||
if not instance.thumb and not instance.skip:
|
||||
thumbnail_url = instance.thumbnail
|
||||
if thumbnail_url:
|
||||
log.info(f'Scheduling task to download thumbnail for: {instance.name} '
|
||||
f'from: {thumbnail_url}')
|
||||
log.info(
|
||||
'Scheduling task to download thumbnail'
|
||||
f' for: {instance.name} from: {thumbnail_url}'
|
||||
)
|
||||
verbose_name = _('Downloading thumbnail for "{}"')
|
||||
download_media_thumbnail(
|
||||
str(instance.pk),
|
||||
thumbnail_url,
|
||||
verbose_name=verbose_name.format(instance.name),
|
||||
)
|
||||
media_file_exists = False
|
||||
try:
|
||||
media_file_exists |= instance.media_file_exists
|
||||
media_file_exists |= instance.filepath.exists()
|
||||
except OSError as e:
|
||||
log.exception(e)
|
||||
pass
|
||||
# If the media has not yet been downloaded schedule it to be downloaded
|
||||
if not (instance.media_file_exists or instance.filepath.exists() or existing_media_download_task):
|
||||
if not (media_file_exists or existing_media_download_task):
|
||||
# The file was deleted after it was downloaded, skip this media.
|
||||
if instance.can_download and instance.downloaded:
|
||||
skip_changed = True != instance.skip
|
||||
@ -289,17 +297,43 @@ def media_pre_delete(sender, instance, **kwargs):
|
||||
delete_task_by_media('sync.tasks.wait_for_media_premiere', (str(instance.pk),))
|
||||
thumbnail_url = instance.thumbnail
|
||||
if thumbnail_url:
|
||||
delete_task_by_media('sync.tasks.download_media_thumbnail',
|
||||
(str(instance.pk), thumbnail_url))
|
||||
delete_task_by_media(
|
||||
'sync.tasks.download_media_thumbnail',
|
||||
(str(instance.pk), thumbnail_url,),
|
||||
)
|
||||
# Remove thumbnail file for deleted media
|
||||
if instance.thumb:
|
||||
instance.thumb.delete(save=False)
|
||||
# Save the metadata site & thumbnail URL to the metadata column
|
||||
existing_metadata = instance.loaded_metadata
|
||||
metadata_str = instance.metadata or '{}'
|
||||
arg_dict = instance.metadata_loads(metadata_str)
|
||||
site_field = instance.get_metadata_field('extractor_key')
|
||||
thumbnail_field = instance.get_metadata_field('thumbnail')
|
||||
arg_dict.update({
|
||||
site_field: instance.get_metadata_first_value(
|
||||
'extractor_key',
|
||||
'Youtube',
|
||||
arg_dict=existing_metadata,
|
||||
),
|
||||
thumbnail_field: thumbnail_url,
|
||||
})
|
||||
instance.metadata = instance.metadata_dumps(arg_dict=arg_dict)
|
||||
# Do not create more tasks before deleting
|
||||
instance.manual_skip = True
|
||||
instance.save()
|
||||
|
||||
|
||||
@receiver(post_delete, sender=Media)
|
||||
def media_post_delete(sender, instance, **kwargs):
|
||||
# Remove the video file, when configured to do so
|
||||
if instance.source.delete_files_on_disk and instance.media_file:
|
||||
remove_files = (
|
||||
instance.source and
|
||||
instance.source.delete_files_on_disk and
|
||||
instance.downloaded and
|
||||
instance.media_file
|
||||
)
|
||||
if remove_files:
|
||||
video_path = Path(str(instance.media_file.path)).resolve(strict=False)
|
||||
instance.media_file.delete(save=False)
|
||||
# the other files we created have these known suffixes
|
||||
@ -354,3 +388,58 @@ def media_post_delete(sender, instance, **kwargs):
|
||||
log.info(f'Deleting file for: {instance} path: {file}')
|
||||
delete_file(file)
|
||||
|
||||
# Create a media entry for the indexing task to find
|
||||
# Requirements:
|
||||
# source, key, duration, title, published
|
||||
created = False
|
||||
create_for_indexing_task = (
|
||||
not (
|
||||
#not instance.downloaded and
|
||||
instance.skip and
|
||||
instance.manual_skip
|
||||
)
|
||||
)
|
||||
if create_for_indexing_task:
|
||||
skipped_media, created = Media.objects.get_or_create(
|
||||
key=instance.key,
|
||||
source=instance.source,
|
||||
)
|
||||
if created:
|
||||
old_metadata = instance.loaded_metadata
|
||||
site_field = instance.get_metadata_field('extractor_key')
|
||||
thumbnail_url = instance.thumbnail
|
||||
thumbnail_field = instance.get_metadata_field('thumbnail')
|
||||
skipped_media.downloaded = False
|
||||
skipped_media.duration = instance.duration
|
||||
arg_dict=dict(
|
||||
_media_instance_was_deleted=True,
|
||||
)
|
||||
arg_dict.update({
|
||||
site_field: old_metadata.get(site_field),
|
||||
thumbnail_field: thumbnail_url,
|
||||
})
|
||||
skipped_media.metadata = skipped_media.metadata_dumps(
|
||||
arg_dict=arg_dict,
|
||||
)
|
||||
skipped_media.published = instance.published
|
||||
skipped_media.title = instance.title
|
||||
skipped_media.skip = True
|
||||
skipped_media.manual_skip = True
|
||||
skipped_media.save()
|
||||
# Re-use the old metadata if it exists
|
||||
instance_qs = Metadata.objects.filter(
|
||||
media__isnull=True,
|
||||
site=old_metadata.get(site_field) or 'Youtube',
|
||||
key=skipped_media.key,
|
||||
)
|
||||
try:
|
||||
instance_qs.update(media=skipped_media)
|
||||
except IntegrityError:
|
||||
# Delete the new metadata
|
||||
Metadata.objects.filter(media=skipped_media).delete()
|
||||
try:
|
||||
instance_qs.update(media=skipped_media)
|
||||
except IntegrityError:
|
||||
# Delete the old metadata if it still failed
|
||||
instance_qs.delete()
|
||||
|
||||
|
@ -7,12 +7,17 @@
|
||||
import os
|
||||
import json
|
||||
import math
|
||||
import random
|
||||
import requests
|
||||
import time
|
||||
import uuid
|
||||
from io import BytesIO
|
||||
from hashlib import sha1
|
||||
from pathlib import Path
|
||||
from datetime import datetime, timedelta
|
||||
from shutil import copyfile
|
||||
from shutil import copyfile, rmtree
|
||||
from PIL import Image
|
||||
from django import db
|
||||
from django.conf import settings
|
||||
from django.core.files.base import ContentFile
|
||||
from django.core.files.uploadedfile import SimpleUploadedFile
|
||||
@ -24,14 +29,19 @@ from background_task import background
|
||||
from background_task.exceptions import InvalidTaskError
|
||||
from background_task.models import Task, CompletedTask
|
||||
from common.logger import log
|
||||
from common.errors import NoMediaException, NoMetadataException, DownloadFailedException
|
||||
from common.utils import json_serial, remove_enclosed
|
||||
from common.errors import ( NoFormatException, NoMediaException,
|
||||
NoMetadataException, NoThumbnailException,
|
||||
DownloadFailedException, )
|
||||
from common.utils import ( django_queryset_generator as qs_gen,
|
||||
remove_enclosed, )
|
||||
from .choices import Val, TaskQueue
|
||||
from .models import Source, Media, MediaServer
|
||||
from .utils import (get_remote_image, resize_image_to_height, delete_file,
|
||||
write_text_file, filter_response)
|
||||
from .utils import ( get_remote_image, resize_image_to_height, delete_file,
|
||||
write_text_file, filter_response, )
|
||||
from .youtube import YouTubeError
|
||||
|
||||
db_vendor = db.connection.vendor
|
||||
|
||||
|
||||
def get_hash(task_name, pk):
|
||||
'''
|
||||
@ -54,7 +64,7 @@ def map_task_to_instance(task):
|
||||
'sync.tasks.download_media': Media,
|
||||
'sync.tasks.download_media_metadata': Media,
|
||||
'sync.tasks.save_all_media_for_source': Source,
|
||||
'sync.tasks.refesh_formats': Media,
|
||||
'sync.tasks.refresh_formats': Media,
|
||||
'sync.tasks.rename_media': Media,
|
||||
'sync.tasks.rename_all_media_for_source': Source,
|
||||
'sync.tasks.wait_for_media_premiere': Media,
|
||||
@ -121,8 +131,7 @@ def update_task_status(task, status):
|
||||
else:
|
||||
task.verbose_name = f'[{status}] {task._verbose_name}'
|
||||
try:
|
||||
with atomic():
|
||||
task.save(update_fields={'verbose_name'})
|
||||
task.save(update_fields={'verbose_name'})
|
||||
except DatabaseError as e:
|
||||
if 'Save with update_fields did not affect any rows.' == str(e):
|
||||
pass
|
||||
@ -200,25 +209,43 @@ def migrate_queues():
|
||||
return qs.update(queue=Val(TaskQueue.NET))
|
||||
|
||||
|
||||
def save_model(instance):
|
||||
if 'sqlite' != db_vendor:
|
||||
with atomic(durable=False):
|
||||
instance.save()
|
||||
return
|
||||
|
||||
# work around for SQLite and its many
|
||||
# "database is locked" errors
|
||||
with atomic(durable=False):
|
||||
instance.save()
|
||||
arg = getattr(settings, 'SQLITE_DELAY_FLOAT', 1.5)
|
||||
time.sleep(random.expovariate(arg))
|
||||
|
||||
|
||||
@atomic(durable=False)
|
||||
def schedule_media_servers_update():
|
||||
with atomic():
|
||||
# Schedule a task to update media servers
|
||||
log.info(f'Scheduling media server updates')
|
||||
verbose_name = _('Request media server rescan for "{}"')
|
||||
for mediaserver in MediaServer.objects.all():
|
||||
rescan_media_server(
|
||||
str(mediaserver.pk),
|
||||
priority=10,
|
||||
verbose_name=verbose_name.format(mediaserver),
|
||||
remove_existing_tasks=True,
|
||||
)
|
||||
# Schedule a task to update media servers
|
||||
log.info(f'Scheduling media server updates')
|
||||
verbose_name = _('Request media server rescan for "{}"')
|
||||
for mediaserver in MediaServer.objects.all():
|
||||
rescan_media_server(
|
||||
str(mediaserver.pk),
|
||||
verbose_name=verbose_name.format(mediaserver),
|
||||
)
|
||||
|
||||
|
||||
def cleanup_old_media():
|
||||
with atomic():
|
||||
for source in Source.objects.filter(delete_old_media=True, days_to_keep__gt=0):
|
||||
for source in qs_gen(Source.objects.filter(delete_old_media=True, days_to_keep__gt=0)):
|
||||
delta = timezone.now() - timedelta(days=source.days_to_keep)
|
||||
for media in source.media_source.filter(downloaded=True, download_date__lt=delta):
|
||||
mqs = source.media_source.defer(
|
||||
'metadata',
|
||||
).filter(
|
||||
downloaded=True,
|
||||
download_date__lt=delta,
|
||||
)
|
||||
for media in qs_gen(mqs):
|
||||
log.info(f'Deleting expired media: {source} / {media} '
|
||||
f'(now older than {source.days_to_keep} days / '
|
||||
f'download_date before {delta})')
|
||||
@ -232,8 +259,12 @@ def cleanup_removed_media(source, videos):
|
||||
if not source.delete_removed_media:
|
||||
return
|
||||
log.info(f'Cleaning up media no longer in source: {source}')
|
||||
media_objects = Media.objects.filter(source=source)
|
||||
for media in media_objects:
|
||||
mqs = Media.objects.defer(
|
||||
'metadata',
|
||||
).filter(
|
||||
source=source,
|
||||
)
|
||||
for media in qs_gen(mqs):
|
||||
matching_source_item = [video['id'] for video in videos if video['id'] == media.key]
|
||||
if not matching_source_item:
|
||||
log.info(f'{media.name} is no longer in source, removing')
|
||||
@ -242,11 +273,12 @@ def cleanup_removed_media(source, videos):
|
||||
schedule_media_servers_update()
|
||||
|
||||
|
||||
@background(schedule=dict(priority=10, run_at=30), queue=Val(TaskQueue.NET), remove_existing_tasks=True)
|
||||
@background(schedule=dict(priority=20, run_at=30), queue=Val(TaskQueue.NET), remove_existing_tasks=True)
|
||||
def index_source_task(source_id):
|
||||
'''
|
||||
Indexes media available from a Source object.
|
||||
'''
|
||||
db.reset_queries()
|
||||
cleanup_completed_tasks()
|
||||
# deleting expired media should happen any time an index task is requested
|
||||
cleanup_old_media()
|
||||
@ -261,7 +293,7 @@ def index_source_task(source_id):
|
||||
# Reset any errors
|
||||
# TODO: determine if this affects anything
|
||||
source.has_failed = False
|
||||
source.save()
|
||||
save_model(source)
|
||||
# Index the source
|
||||
videos = source.index_media()
|
||||
if not videos:
|
||||
@ -272,7 +304,7 @@ def index_source_task(source_id):
|
||||
f'is reachable')
|
||||
# Got some media, update the last crawl timestamp
|
||||
source.last_crawl = timezone.now()
|
||||
source.save()
|
||||
save_model(source)
|
||||
num_videos = len(videos)
|
||||
log.info(f'Found {num_videos} media items for source: {source}')
|
||||
fields = lambda f, m: m.get_metadata_field(f)
|
||||
@ -291,6 +323,7 @@ def index_source_task(source_id):
|
||||
# Video has no unique key (ID), it can't be indexed
|
||||
continue
|
||||
update_task_status(task, tvn_format.format(vn))
|
||||
# media, new_media = Media.objects.get_or_create(key=key, source=source)
|
||||
try:
|
||||
media = Media.objects.get(key=key, source=source)
|
||||
except Media.DoesNotExist:
|
||||
@ -299,9 +332,13 @@ def index_source_task(source_id):
|
||||
media.duration = float(video.get(fields('duration', media), None) or 0) or None
|
||||
media.title = str(video.get(fields('title', media), ''))[:200]
|
||||
timestamp = video.get(fields('timestamp', media), None)
|
||||
published_dt = media.metadata_published(timestamp)
|
||||
if published_dt is not None:
|
||||
media.published = published_dt
|
||||
try:
|
||||
published_dt = media.ts_to_dt(timestamp)
|
||||
except AssertionError:
|
||||
pass
|
||||
else:
|
||||
if published_dt:
|
||||
media.published = published_dt
|
||||
try:
|
||||
media.save()
|
||||
except IntegrityError as e:
|
||||
@ -310,23 +347,37 @@ def index_source_task(source_id):
|
||||
log.debug(f'Indexed media: {vn}: {source} / {media}')
|
||||
# log the new media instances
|
||||
new_media_instance = (
|
||||
# new_media or
|
||||
media.created and
|
||||
source.last_crawl and
|
||||
media.created >= source.last_crawl
|
||||
)
|
||||
if new_media_instance:
|
||||
log.info(f'Indexed new media: {source} / {media}')
|
||||
log.info(f'Scheduling tasks to download thumbnail for: {media.key}')
|
||||
thumbnail_fmt = 'https://i.ytimg.com/vi/{}/{}default.jpg'
|
||||
vn_fmt = _('Downloading {} thumbnail for: "{}": {}')
|
||||
for prefix in ('hq', 'sd', 'maxres',):
|
||||
thumbnail_url = thumbnail_fmt.format(
|
||||
media.key,
|
||||
prefix,
|
||||
)
|
||||
download_media_thumbnail(
|
||||
str(media.pk),
|
||||
thumbnail_url,
|
||||
verbose_name=vn_fmt.format(prefix, media.key, media.name),
|
||||
)
|
||||
log.info(f'Scheduling task to download metadata for: {media.url}')
|
||||
verbose_name = _('Downloading metadata for "{}"')
|
||||
verbose_name = _('Downloading metadata for: "{}": {}')
|
||||
download_media_metadata(
|
||||
str(media.pk),
|
||||
priority=20,
|
||||
verbose_name=verbose_name.format(media.pk),
|
||||
verbose_name=verbose_name.format(media.key, media.name),
|
||||
)
|
||||
# Reset task.verbose_name to the saved value
|
||||
update_task_status(task, None)
|
||||
# Cleanup of media no longer available from the source
|
||||
cleanup_removed_media(source, videos)
|
||||
videos = video = None
|
||||
|
||||
|
||||
@background(schedule=dict(priority=0, run_at=0), queue=Val(TaskQueue.FS))
|
||||
@ -348,7 +399,7 @@ def check_source_directory_exists(source_id):
|
||||
source.make_directory()
|
||||
|
||||
|
||||
@background(schedule=dict(priority=5, run_at=10), queue=Val(TaskQueue.NET))
|
||||
@background(schedule=dict(priority=10, run_at=10), queue=Val(TaskQueue.NET))
|
||||
def download_source_images(source_id):
|
||||
'''
|
||||
Downloads an image and save it as a local thumbnail attached to a
|
||||
@ -379,6 +430,7 @@ def download_source_images(source_id):
|
||||
file_path = source.directory_path / file_name
|
||||
with open(file_path, 'wb') as f:
|
||||
f.write(django_file.read())
|
||||
i = image_file = None
|
||||
|
||||
if avatar != None:
|
||||
url = avatar
|
||||
@ -394,11 +446,12 @@ def download_source_images(source_id):
|
||||
file_path = source.directory_path / file_name
|
||||
with open(file_path, 'wb') as f:
|
||||
f.write(django_file.read())
|
||||
i = image_file = None
|
||||
|
||||
log.info(f'Thumbnail downloaded for source with ID: {source_id} / {source}')
|
||||
|
||||
|
||||
@background(schedule=dict(priority=20, run_at=60), queue=Val(TaskQueue.NET), remove_existing_tasks=True)
|
||||
@background(schedule=dict(priority=40, run_at=60), queue=Val(TaskQueue.NET), remove_existing_tasks=True)
|
||||
def download_media_metadata(media_id):
|
||||
'''
|
||||
Downloads the metadata for a media item.
|
||||
@ -459,14 +512,24 @@ def download_media_metadata(media_id):
|
||||
response = metadata
|
||||
if getattr(settings, 'SHRINK_NEW_MEDIA_METADATA', False):
|
||||
response = filter_response(metadata, True)
|
||||
media.metadata = json.dumps(response, separators=(',', ':'), default=json_serial)
|
||||
media.ingest_metadata(response)
|
||||
pointer_dict = {'_using_table': True}
|
||||
media.metadata = media.metadata_dumps(arg_dict=pointer_dict)
|
||||
upload_date = media.upload_date
|
||||
# Media must have a valid upload date
|
||||
if upload_date:
|
||||
media.published = timezone.make_aware(upload_date)
|
||||
published = media.metadata_published()
|
||||
if published:
|
||||
media.published = published
|
||||
timestamp = media.get_metadata_first_value(
|
||||
('release_timestamp', 'timestamp',),
|
||||
arg_dict=response,
|
||||
)
|
||||
try:
|
||||
published_dt = media.ts_to_dt(timestamp)
|
||||
except AssertionError:
|
||||
pass
|
||||
else:
|
||||
if published_dt:
|
||||
media.published = published_dt
|
||||
|
||||
# Store title in DB so it's fast to access
|
||||
if media.metadata_title:
|
||||
@ -477,12 +540,12 @@ def download_media_metadata(media_id):
|
||||
media.duration = media.metadata_duration
|
||||
|
||||
# Don't filter media here, the post_save signal will handle that
|
||||
media.save()
|
||||
log.info(f'Saved {len(media.metadata)} bytes of metadata for: '
|
||||
save_model(media)
|
||||
log.info(f'Saved {len(media.metadata_dumps())} bytes of metadata for: '
|
||||
f'{source} / {media}: {media_id}')
|
||||
|
||||
|
||||
@background(schedule=dict(priority=15, run_at=10), queue=Val(TaskQueue.NET), remove_existing_tasks=True)
|
||||
@background(schedule=dict(priority=10, run_at=10), queue=Val(TaskQueue.FS), remove_existing_tasks=True)
|
||||
def download_media_thumbnail(media_id, url):
|
||||
'''
|
||||
Downloads an image from a URL and save it as a local thumbnail attached to a
|
||||
@ -500,10 +563,19 @@ def download_media_thumbnail(media_id, url):
|
||||
return
|
||||
width = getattr(settings, 'MEDIA_THUMBNAIL_WIDTH', 430)
|
||||
height = getattr(settings, 'MEDIA_THUMBNAIL_HEIGHT', 240)
|
||||
i = get_remote_image(url)
|
||||
log.info(f'Resizing {i.width}x{i.height} thumbnail to '
|
||||
f'{width}x{height}: {url}')
|
||||
i = resize_image_to_height(i, width, height)
|
||||
try:
|
||||
try:
|
||||
i = get_remote_image(url)
|
||||
except requests.HTTPError as re:
|
||||
if 404 != re.response.status_code:
|
||||
raise
|
||||
raise NoThumbnailException(re.response.reason) from re
|
||||
except NoThumbnailException as e:
|
||||
raise InvalidTaskError(str(e.__cause__)) from e
|
||||
if (i.width > width) and (i.height > height):
|
||||
log.info(f'Resizing {i.width}x{i.height} thumbnail to '
|
||||
f'{width}x{height}: {url}')
|
||||
i = resize_image_to_height(i, width, height)
|
||||
image_file = BytesIO()
|
||||
i.save(image_file, 'JPEG', quality=85, optimize=True, progressive=True)
|
||||
image_file.seek(0)
|
||||
@ -516,12 +588,23 @@ def download_media_thumbnail(media_id, url):
|
||||
),
|
||||
save=True
|
||||
)
|
||||
i = image_file = None
|
||||
log.info(f'Saved thumbnail for: {media} from: {url}')
|
||||
# After media is downloaded, copy the updated thumbnail.
|
||||
copy_thumbnail = (
|
||||
media.downloaded and
|
||||
media.source.copy_thumbnails and
|
||||
media.thumb_file_exists
|
||||
)
|
||||
if copy_thumbnail:
|
||||
log.info(f'Copying media thumbnail from: {media.thumb.path} '
|
||||
f'to: {media.thumbpath}')
|
||||
copyfile(media.thumb.path, media.thumbpath)
|
||||
return True
|
||||
|
||||
|
||||
@background(schedule=dict(priority=15, run_at=60), queue=Val(TaskQueue.NET), remove_existing_tasks=True)
|
||||
def download_media(media_id):
|
||||
@background(schedule=dict(priority=30, run_at=60), queue=Val(TaskQueue.NET), remove_existing_tasks=True)
|
||||
def download_media(media_id, override=False):
|
||||
'''
|
||||
Downloads the media to disk and attaches it to the Media instance.
|
||||
'''
|
||||
@ -530,83 +613,44 @@ def download_media(media_id):
|
||||
except Media.DoesNotExist as e:
|
||||
# Task triggered but the media no longer exists, do nothing
|
||||
raise InvalidTaskError(_('no such media')) from e
|
||||
if not media.source.download_media:
|
||||
log.warn(f'Download task triggered for media: {media} (UUID: {media.pk}) but '
|
||||
f'the source {media.source} has since been marked to not download, '
|
||||
f'not downloading')
|
||||
return
|
||||
if media.skip or media.manual_skip:
|
||||
# Media was toggled to be skipped after the task was scheduled
|
||||
log.warn(f'Download task triggered for media: {media} (UUID: {media.pk}) but '
|
||||
f'it is now marked to be skipped, not downloading')
|
||||
return
|
||||
# metadata is required to generate the proper filepath
|
||||
if not media.has_metadata:
|
||||
raise NoMetadataException('Metadata is not yet available.')
|
||||
downloaded_file_exists = (
|
||||
media.downloaded and
|
||||
media.has_metadata and
|
||||
(
|
||||
media.media_file_exists or
|
||||
media.filepath.exists()
|
||||
)
|
||||
)
|
||||
if downloaded_file_exists:
|
||||
# Media has been marked as downloaded before the download_media task was fired,
|
||||
# skip it
|
||||
log.warn(f'Download task triggered for media: {media} (UUID: {media.pk}) but '
|
||||
f'it has already been marked as downloaded, not downloading again')
|
||||
return
|
||||
max_cap_age = media.source.download_cap_date
|
||||
published = media.published
|
||||
if max_cap_age and published:
|
||||
if published <= max_cap_age:
|
||||
log.warn(f'Download task triggered media: {media} (UUID: {media.pk}) but '
|
||||
f'the source has a download cap and the media is now too old, '
|
||||
f'not downloading')
|
||||
else:
|
||||
if not media.download_checklist(override):
|
||||
return
|
||||
|
||||
filepath = media.filepath
|
||||
container = format_str = None
|
||||
log.info(f'Downloading media: {media} (UUID: {media.pk}) to: "{filepath}"')
|
||||
format_str, container = media.download_media()
|
||||
if os.path.exists(filepath):
|
||||
try:
|
||||
format_str, container = media.download_media()
|
||||
except NoFormatException as e:
|
||||
# Try refreshing formats
|
||||
if media.has_metadata:
|
||||
log.debug(f'Scheduling a task to refresh metadata for: {media.key}: "{media.name}"')
|
||||
refresh_formats(
|
||||
str(media.pk),
|
||||
verbose_name=f'Refreshing metadata formats for: {media.key}: "{media.name}"',
|
||||
)
|
||||
log.exception(str(e))
|
||||
raise
|
||||
else:
|
||||
if not os.path.exists(filepath):
|
||||
# Try refreshing formats
|
||||
if media.has_metadata:
|
||||
log.debug(f'Scheduling a task to refresh metadata for: {media.key}: "{media.name}"')
|
||||
refresh_formats(
|
||||
str(media.pk),
|
||||
verbose_name=f'Refreshing metadata formats for: {media.key}: "{media.name}"',
|
||||
)
|
||||
# Expected file doesn't exist on disk
|
||||
err = (f'Failed to download media: {media} (UUID: {media.pk}) to disk, '
|
||||
f'expected outfile does not exist: {filepath}')
|
||||
log.error(err)
|
||||
# Raising an error here triggers the task to be re-attempted (or fail)
|
||||
raise DownloadFailedException(err)
|
||||
|
||||
# Media has been downloaded successfully
|
||||
log.info(f'Successfully downloaded media: {media} (UUID: {media.pk}) to: '
|
||||
f'"{filepath}"')
|
||||
# Link the media file to the object and update info about the download
|
||||
media.media_file.name = str(media.source.type_directory_path / media.filename)
|
||||
media.downloaded = True
|
||||
media.download_date = timezone.now()
|
||||
media.downloaded_filesize = os.path.getsize(filepath)
|
||||
media.downloaded_container = container
|
||||
if '+' in format_str:
|
||||
# Seperate audio and video streams
|
||||
vformat_code, aformat_code = format_str.split('+')
|
||||
aformat = media.get_format_by_code(aformat_code)
|
||||
vformat = media.get_format_by_code(vformat_code)
|
||||
media.downloaded_format = vformat['format']
|
||||
media.downloaded_height = vformat['height']
|
||||
media.downloaded_width = vformat['width']
|
||||
media.downloaded_audio_codec = aformat['acodec']
|
||||
media.downloaded_video_codec = vformat['vcodec']
|
||||
media.downloaded_container = container
|
||||
media.downloaded_fps = vformat['fps']
|
||||
media.downloaded_hdr = vformat['is_hdr']
|
||||
else:
|
||||
# Combined stream or audio-only stream
|
||||
cformat_code = format_str
|
||||
cformat = media.get_format_by_code(cformat_code)
|
||||
media.downloaded_audio_codec = cformat['acodec']
|
||||
if cformat['vcodec']:
|
||||
# Combined
|
||||
media.downloaded_format = cformat['format']
|
||||
media.downloaded_height = cformat['height']
|
||||
media.downloaded_width = cformat['width']
|
||||
media.downloaded_video_codec = cformat['vcodec']
|
||||
media.downloaded_fps = cformat['fps']
|
||||
media.downloaded_hdr = cformat['is_hdr']
|
||||
else:
|
||||
media.downloaded_format = 'audio'
|
||||
media.save()
|
||||
media.download_finished(format_str, container, filepath)
|
||||
save_model(media)
|
||||
# If selected, copy the thumbnail over as well
|
||||
if media.source.copy_thumbnails:
|
||||
if not media.thumb_file_exists:
|
||||
@ -630,16 +674,6 @@ def download_media(media_id):
|
||||
pass
|
||||
# Schedule a task to update media servers
|
||||
schedule_media_servers_update()
|
||||
else:
|
||||
# Expected file doesn't exist on disk
|
||||
err = (f'Failed to download media: {media} (UUID: {media.pk}) to disk, '
|
||||
f'expected outfile does not exist: {filepath}')
|
||||
log.error(err)
|
||||
# Try refreshing formats
|
||||
if media.has_metadata:
|
||||
media.refresh_formats
|
||||
# Raising an error here triggers the task to be re-attempted (or fail)
|
||||
raise DownloadFailedException(err)
|
||||
|
||||
|
||||
@background(schedule=dict(priority=0, run_at=30), queue=Val(TaskQueue.NET), remove_existing_tasks=True)
|
||||
@ -657,7 +691,7 @@ def rescan_media_server(mediaserver_id):
|
||||
mediaserver.update()
|
||||
|
||||
|
||||
@background(schedule=dict(priority=25, run_at=600), queue=Val(TaskQueue.FS), remove_existing_tasks=True)
|
||||
@background(schedule=dict(priority=30, run_at=600), queue=Val(TaskQueue.FS), remove_existing_tasks=True)
|
||||
def save_all_media_for_source(source_id):
|
||||
'''
|
||||
Iterates all media items linked to a source and saves them to
|
||||
@ -665,6 +699,7 @@ def save_all_media_for_source(source_id):
|
||||
source has its parameters changed and all media needs to be
|
||||
checked to see if its download status has changed.
|
||||
'''
|
||||
db.reset_queries()
|
||||
try:
|
||||
source = Source.objects.get(pk=source_id)
|
||||
except Source.DoesNotExist as e:
|
||||
@ -673,16 +708,27 @@ def save_all_media_for_source(source_id):
|
||||
f'source exists with ID: {source_id}')
|
||||
raise InvalidTaskError(_('no such source')) from e
|
||||
|
||||
saved_later = set()
|
||||
mqs = Media.objects.filter(source=source)
|
||||
task = get_source_check_task(source_id)
|
||||
refresh_qs = mqs.filter(
|
||||
refresh_qs = Media.objects.all().only(
|
||||
'pk',
|
||||
'uuid',
|
||||
'key',
|
||||
'title', # for name property
|
||||
).filter(
|
||||
source=source,
|
||||
can_download=False,
|
||||
skip=False,
|
||||
manual_skip=False,
|
||||
downloaded=False,
|
||||
metadata__isnull=False,
|
||||
)
|
||||
save_qs = Media.objects.all().only(
|
||||
'pk',
|
||||
'uuid',
|
||||
).filter(
|
||||
source=source,
|
||||
)
|
||||
saved_later = set()
|
||||
task = get_source_check_task(source_id)
|
||||
if task:
|
||||
task._verbose_name = remove_enclosed(
|
||||
task.verbose_name, '[', ']', ' ',
|
||||
@ -690,28 +736,35 @@ def save_all_media_for_source(source_id):
|
||||
end=task.verbose_name.find('Check'),
|
||||
)
|
||||
tvn_format = '1/{:,}' + f'/{refresh_qs.count():,}'
|
||||
for mn, media in enumerate(refresh_qs, start=1):
|
||||
for mn, media in enumerate(qs_gen(refresh_qs), start=1):
|
||||
update_task_status(task, tvn_format.format(mn))
|
||||
refesh_formats(
|
||||
refresh_formats(
|
||||
str(media.pk),
|
||||
verbose_name=f'Refreshing metadata formats for: {media.key}: "{media.name}"',
|
||||
)
|
||||
saved_later.add(media.uuid)
|
||||
|
||||
# Keep out of the way of the index task!
|
||||
# SQLite will be locked for a while if we start
|
||||
# a large source, which reschedules a more costly task.
|
||||
if 'sqlite' == db_vendor:
|
||||
index_task = get_source_index_task(source_id)
|
||||
if index_task and index_task.locked_by_pid_running():
|
||||
raise Exception(_('Indexing not completed'))
|
||||
|
||||
# Trigger the post_save signal for each media item linked to this source as various
|
||||
# flags may need to be recalculated
|
||||
tvn_format = '2/{:,}' + f'/{mqs.count():,}'
|
||||
for mn, media in enumerate(mqs, start=1):
|
||||
tvn_format = '2/{:,}' + f'/{save_qs.count():,}'
|
||||
for mn, media in enumerate(qs_gen(save_qs), start=1):
|
||||
if media.uuid not in saved_later:
|
||||
update_task_status(task, tvn_format.format(mn))
|
||||
with atomic():
|
||||
media.save()
|
||||
save_model(media)
|
||||
# Reset task.verbose_name to the saved value
|
||||
update_task_status(task, None)
|
||||
|
||||
|
||||
@background(schedule=dict(priority=10, run_at=0), queue=Val(TaskQueue.NET), remove_existing_tasks=True)
|
||||
def refesh_formats(media_id):
|
||||
@background(schedule=dict(priority=50, run_at=0), queue=Val(TaskQueue.NET), remove_existing_tasks=True)
|
||||
def refresh_formats(media_id):
|
||||
try:
|
||||
media = Media.objects.get(pk=media_id)
|
||||
except Media.DoesNotExist as e:
|
||||
@ -722,17 +775,18 @@ def refesh_formats(media_id):
|
||||
log.debug(f'Failed to refresh formats for: {media.source} / {media.key}: {e!s}')
|
||||
pass
|
||||
else:
|
||||
with atomic():
|
||||
media.save()
|
||||
save_model(media)
|
||||
|
||||
|
||||
@background(schedule=dict(priority=20, run_at=60), queue=Val(TaskQueue.FS), remove_existing_tasks=True)
|
||||
def rename_media(media_id):
|
||||
try:
|
||||
media = Media.objects.defer('metadata', 'thumb').get(pk=media_id)
|
||||
media = Media.objects.get(pk=media_id)
|
||||
except Media.DoesNotExist as e:
|
||||
raise InvalidTaskError(_('no such media')) from e
|
||||
media.rename_files()
|
||||
else:
|
||||
with atomic():
|
||||
media.rename_files()
|
||||
|
||||
|
||||
@background(schedule=dict(priority=20, run_at=300), queue=Val(TaskQueue.FS), remove_existing_tasks=True)
|
||||
@ -756,59 +810,71 @@ def rename_all_media_for_source(source_id):
|
||||
)
|
||||
if not create_rename_tasks:
|
||||
return
|
||||
mqs = Media.objects.all().defer(
|
||||
'metadata',
|
||||
'thumb',
|
||||
).filter(
|
||||
mqs = Media.objects.all().filter(
|
||||
source=source,
|
||||
downloaded=True,
|
||||
)
|
||||
for media in mqs:
|
||||
for media in qs_gen(mqs):
|
||||
with atomic():
|
||||
media.rename_files()
|
||||
|
||||
|
||||
@background(schedule=dict(priority=0, run_at=60), queue=Val(TaskQueue.DB), remove_existing_tasks=True)
|
||||
def wait_for_media_premiere(media_id):
|
||||
hours = lambda td: 1+int((24*td.days)+(td.seconds/(60*60)))
|
||||
|
||||
try:
|
||||
media = Media.objects.get(pk=media_id)
|
||||
except Media.DoesNotExist as e:
|
||||
raise InvalidTaskError(_('no such media')) from e
|
||||
if media.has_metadata:
|
||||
return
|
||||
now = timezone.now()
|
||||
if media.published < now:
|
||||
media.manual_skip = False
|
||||
media.skip = False
|
||||
# start the download tasks
|
||||
media.save()
|
||||
else:
|
||||
media.manual_skip = True
|
||||
media.title = _(f'Premieres in {hours(media.published - now)} hours')
|
||||
media.save()
|
||||
task = get_media_premiere_task(media_id)
|
||||
if task:
|
||||
update_task_status(task, f'available in {hours(media.published - now)} hours')
|
||||
valid, hours = media.wait_for_premiere()
|
||||
if not valid:
|
||||
return
|
||||
|
||||
if hours:
|
||||
task = get_media_premiere_task(media_id)
|
||||
if task:
|
||||
update_task_status(task, f'available in {hours} hours')
|
||||
save_model(media)
|
||||
|
||||
@background(schedule=dict(priority=1, run_at=300), queue=Val(TaskQueue.FS), remove_existing_tasks=False)
|
||||
def delete_all_media_for_source(source_id, source_name):
|
||||
|
||||
@background(schedule=dict(priority=1, run_at=90), queue=Val(TaskQueue.FS), remove_existing_tasks=False)
|
||||
def delete_all_media_for_source(source_id, source_name, source_directory):
|
||||
source = None
|
||||
assert source_id
|
||||
assert source_name
|
||||
assert source_directory
|
||||
try:
|
||||
source = Source.objects.get(pk=source_id)
|
||||
except Source.DoesNotExist as e:
|
||||
# Task triggered but the source no longer exists, do nothing
|
||||
log.error(f'Task delete_all_media_for_source(pk={source_id}) called but no '
|
||||
log.warn(f'Task delete_all_media_for_source(pk={source_id}) called but no '
|
||||
f'source exists with ID: {source_id}')
|
||||
raise InvalidTaskError(_('no such source')) from e
|
||||
#raise InvalidTaskError(_('no such source')) from e
|
||||
pass # this task can run after a source was deleted
|
||||
mqs = Media.objects.all().defer(
|
||||
'metadata',
|
||||
).filter(
|
||||
source=source or source_id,
|
||||
)
|
||||
for media in mqs:
|
||||
log.info(f'Deleting media for source: {source_name} item: {media.name}')
|
||||
with atomic():
|
||||
media.delete()
|
||||
with atomic(durable=True):
|
||||
for media in qs_gen(mqs):
|
||||
log.info(f'Deleting media for source: {source_name} item: {media.name}')
|
||||
with atomic():
|
||||
#media.downloaded = False
|
||||
media.skip = True
|
||||
media.manual_skip = True
|
||||
media.save()
|
||||
media.delete()
|
||||
# Remove the directory, if the user requested that
|
||||
directory_path = Path(source_directory)
|
||||
remove = (
|
||||
(source and source.delete_removed_media) or
|
||||
(directory_path / '.to_be_removed').is_file()
|
||||
)
|
||||
if source:
|
||||
with atomic(durable=True):
|
||||
source.delete()
|
||||
if remove:
|
||||
log.info(f'Deleting directory for: {source_name}: {directory_path}')
|
||||
rmtree(directory_path, True)
|
||||
|
||||
|
@ -39,6 +39,9 @@
|
||||
<div class="card-image">
|
||||
<img src="{% if media.thumb %}{% url 'sync:media-thumb' pk=media.pk %}{% else %}{% static 'images/nothumb.png' %}{% endif %}">
|
||||
</div>
|
||||
<div>
|
||||
<a href="{% url 'sync:redownload-thumb' pk=media.pk %}"><i class="fas fa-arrow-rotate-right"></i></a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -28,7 +28,7 @@
|
||||
<td class="hide-on-small-only">Verify HTTPS</td>
|
||||
<td><span class="hide-on-med-and-up">Verify HTTPS<br></span><strong>{% if mediaserver.verify_https %}<i class="fas fa-check"></i>{% else %}<i class="fas fa-times"></i>{% endif %}</strong></td>
|
||||
</tr>
|
||||
{% for name, value in mediaserver.loaded_options.items %}
|
||||
{% for name, value in mediaserver.options.items %}
|
||||
<tr title="Unique key of the source, such as the channel name or playlist ID">
|
||||
<td class="hide-on-small-only">{{ name|title }}</td>
|
||||
<td><span class="hide-on-med-and-up">{{ name|title }}<br></span><strong>{% if name in private_options %}{{ value|truncatechars:6 }} (hidden){% else %}{{ value }}{% endif %}</strong></td>
|
||||
|
34
tubesync/sync/templates/sync/task-schedule.html
Normal file
34
tubesync/sync/templates/sync/task-schedule.html
Normal file
@ -0,0 +1,34 @@
|
||||
{% extends 'base.html' %}
|
||||
|
||||
{% block headtitle %}Schedule task{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="row no-margin-bottom">
|
||||
<div class="col s12">
|
||||
<h1>Schedule task</h1>
|
||||
<p>
|
||||
If you don't want to wait for the existing schedule to be triggered,
|
||||
you can use this to change when the task will be scheduled to run.
|
||||
It is not guaranteed to run at any exact time, because when a task
|
||||
requests to run and when a slot to execute it, in the appropriate
|
||||
queue and with the priority level assigned, is dependent on how long
|
||||
other tasks are taking to complete the assigned work.
|
||||
</p>
|
||||
<p>
|
||||
This will change the time that the task is requesting to be the
|
||||
current time, or a chosen future time.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row">
|
||||
<form method="post" action="{% url 'sync:run-task' pk=task.pk %}" class="col s12 simpleform">
|
||||
{% csrf_token %}
|
||||
{% include 'simpleform.html' with form=form %}
|
||||
<div class="row no-margin-bottom padding-top">
|
||||
<div class="col s12">
|
||||
<button class="btn" type="submit" name="action">Schedule task <i class="far fa-clock"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
{% endblock %}
|
@ -43,11 +43,16 @@
|
||||
</p>
|
||||
<div class="collection">
|
||||
{% for task in errors %}
|
||||
<a href="{% url task.url pk=task.instance.pk %}" class="collection-item error-text">
|
||||
<i class="fas fa-exclamation-triangle"></i> <strong>{{ task }}</strong>, attempted {{ task.attempts }} time{{ task.attempts|pluralize }}<br>
|
||||
Error: "{{ task.error_message }}"<br>
|
||||
<div class="collection-item error-text">
|
||||
<a href="{% url task.url pk=task.instance.pk %}" class="error-text">
|
||||
<i class="fas fa-exclamation-triangle"></i> <strong>{{ task }}</strong>, attempted {{ task.attempts }} time{{ task.attempts|pluralize }}<br>
|
||||
Error: "{{ task.error_message }}"<br>
|
||||
</a>
|
||||
<i class="fas fa-history"></i> Task will be retried at <strong>{{ task.run_at|date:'Y-m-d H:i:s' }}</strong>
|
||||
</a>
|
||||
<a href="{% url 'sync:run-task' pk=task.pk %}" class="error-text">
|
||||
<i class="fas fa-undo"></i>
|
||||
</a>
|
||||
</div>
|
||||
{% empty %}
|
||||
<span class="collection-item no-items"><i class="fas fa-info-circle"></i> There are no tasks with errors on this page.</span>
|
||||
{% endfor %}
|
||||
@ -66,11 +71,17 @@
|
||||
</p>
|
||||
<div class="collection">
|
||||
{% for task in scheduled %}
|
||||
<a href="{% url task.url pk=task.instance.pk %}" class="collection-item">
|
||||
<i class="fas fa-stopwatch"></i> <strong>{{ task }}</strong><br>
|
||||
{% if task.instance.index_schedule and task.repeat > 0 %}Scheduled to run {{ task.instance.get_index_schedule_display|lower }}.<br>{% endif %}
|
||||
<i class="fas fa-redo"></i> Task will run {% if task.run_now %}<strong>immediately</strong>{% else %}at <strong>{{ task.run_at|date:'Y-m-d H:i:s' }}</strong>{% endif %}
|
||||
</a>
|
||||
<div class="collection-item">
|
||||
<a href="{% url task.url pk=task.instance.pk %}">
|
||||
<i class="fas fa-hourglass-start"></i> <strong>{{ task }}</strong><br>
|
||||
{% if task.instance.index_schedule and task.repeat > 0 %}Scheduled to run {{ task.instance.get_index_schedule_display|lower }}.<br>{% endif %}
|
||||
<i class="far fa-clock"></i> Task will run {% if task.run_now %}<strong>immediately</strong>{% else %}at <strong>{{ task.run_at|date:'Y-m-d H:i:s' }}</strong>
|
||||
</a>
|
||||
<a href="{% url 'sync:run-task' pk=task.pk %}">
|
||||
<i class="far fa-play-circle"></i>
|
||||
{% endif %}
|
||||
</a>
|
||||
</div>
|
||||
{% empty %}
|
||||
<span class="collection-item no-items"><i class="fas fa-info-circle"></i> There are no scheduled tasks on this page.</span>
|
||||
{% endfor %}
|
||||
|
@ -1836,5 +1836,6 @@ class TasksTestCase(TestCase):
|
||||
cleanup_old_media()
|
||||
|
||||
self.assertEqual(src1.media_source.all().count(), 3)
|
||||
self.assertEqual(src2.media_source.all().count(), 2)
|
||||
self.assertEqual(src2.media_source.all().count(), 3)
|
||||
self.assertEqual(Media.objects.filter(pk=m22.pk).exists(), False)
|
||||
self.assertEqual(Media.objects.filter(source=src2, key=m22.key, skip=True).exists(), True)
|
||||
|
@ -3,7 +3,7 @@ from .views import (DashboardView, SourcesView, ValidateSourceView, AddSourceVie
|
||||
SourceView, UpdateSourceView, DeleteSourceView, MediaView,
|
||||
MediaThumbView, MediaItemView, MediaRedownloadView, MediaSkipView,
|
||||
MediaEnableView, MediaContent, TasksView, CompletedTasksView, ResetTasks,
|
||||
MediaServersView, AddMediaServerView, MediaServerView,
|
||||
TaskScheduleView, MediaServersView, AddMediaServerView, MediaServerView,
|
||||
DeleteMediaServerView, UpdateMediaServerView)
|
||||
|
||||
|
||||
@ -14,104 +14,168 @@ urlpatterns = [
|
||||
|
||||
# Dashboard URLs
|
||||
|
||||
path('',
|
||||
DashboardView.as_view(),
|
||||
name='dashboard'),
|
||||
path(
|
||||
'',
|
||||
DashboardView.as_view(),
|
||||
name='dashboard',
|
||||
),
|
||||
|
||||
# Source URLs
|
||||
|
||||
path('sources',
|
||||
SourcesView.as_view(),
|
||||
name='sources'),
|
||||
path(
|
||||
'sources',
|
||||
SourcesView.as_view(),
|
||||
name='sources',
|
||||
),
|
||||
|
||||
path('source-validate/<slug:source_type>',
|
||||
ValidateSourceView.as_view(),
|
||||
name='validate-source'),
|
||||
path(
|
||||
'source-validate/<slug:source_type>',
|
||||
ValidateSourceView.as_view(),
|
||||
name='validate-source',
|
||||
),
|
||||
|
||||
path('source-sync-now/<uuid:pk>',
|
||||
SourcesView.as_view(),
|
||||
name='source-sync-now'),
|
||||
path(
|
||||
'source-sync-now/<uuid:pk>',
|
||||
SourcesView.as_view(),
|
||||
name='source-sync-now',
|
||||
),
|
||||
|
||||
path('source-add',
|
||||
AddSourceView.as_view(),
|
||||
name='add-source'),
|
||||
path(
|
||||
'source-add',
|
||||
AddSourceView.as_view(),
|
||||
name='add-source',
|
||||
),
|
||||
|
||||
path('source/<uuid:pk>',
|
||||
SourceView.as_view(),
|
||||
name='source'),
|
||||
path(
|
||||
'source/<uuid:pk>',
|
||||
SourceView.as_view(),
|
||||
name='source',
|
||||
),
|
||||
|
||||
path('source-update/<uuid:pk>',
|
||||
UpdateSourceView.as_view(),
|
||||
name='update-source'),
|
||||
path(
|
||||
'source-update/<uuid:pk>',
|
||||
UpdateSourceView.as_view(),
|
||||
name='update-source',
|
||||
),
|
||||
|
||||
path('source-delete/<uuid:pk>',
|
||||
DeleteSourceView.as_view(),
|
||||
name='delete-source'),
|
||||
path(
|
||||
'source-delete/<uuid:pk>',
|
||||
DeleteSourceView.as_view(),
|
||||
name='delete-source',
|
||||
),
|
||||
|
||||
# Media URLs
|
||||
|
||||
path('media',
|
||||
MediaView.as_view(),
|
||||
name='media'),
|
||||
path(
|
||||
'media',
|
||||
MediaView.as_view(),
|
||||
name='media',
|
||||
),
|
||||
|
||||
path('media-thumb/<uuid:pk>',
|
||||
MediaThumbView.as_view(),
|
||||
name='media-thumb'),
|
||||
path(
|
||||
'media-thumb/<uuid:pk>',
|
||||
MediaThumbView.as_view(),
|
||||
name='media-thumb',
|
||||
),
|
||||
|
||||
path('media/<uuid:pk>',
|
||||
MediaItemView.as_view(),
|
||||
name='media-item'),
|
||||
path(
|
||||
'media/<uuid:pk>',
|
||||
MediaItemView.as_view(),
|
||||
name='media-item',
|
||||
),
|
||||
|
||||
path('media-redownload/<uuid:pk>',
|
||||
MediaRedownloadView.as_view(),
|
||||
name='redownload-media'),
|
||||
path(
|
||||
'media-redownload/<uuid:pk>',
|
||||
MediaRedownloadView.as_view(),
|
||||
name='redownload-media',
|
||||
),
|
||||
|
||||
path('media-skip/<uuid:pk>',
|
||||
MediaSkipView.as_view(),
|
||||
name='skip-media'),
|
||||
path(
|
||||
'media-thumb-redownload/<uuid:pk>',
|
||||
MediaItemView.as_view(),
|
||||
name='redownload-thumb',
|
||||
),
|
||||
|
||||
path('media-enable/<uuid:pk>',
|
||||
MediaEnableView.as_view(),
|
||||
name='enable-media'),
|
||||
path(
|
||||
'media-skip/<uuid:pk>',
|
||||
MediaSkipView.as_view(),
|
||||
name='skip-media',
|
||||
),
|
||||
|
||||
path('media-content/<uuid:pk>',
|
||||
MediaContent.as_view(),
|
||||
name='media-content'),
|
||||
path(
|
||||
'media-enable/<uuid:pk>',
|
||||
MediaEnableView.as_view(),
|
||||
name='enable-media',
|
||||
),
|
||||
|
||||
path(
|
||||
'media-content/<uuid:pk>',
|
||||
MediaContent.as_view(),
|
||||
name='media-content',
|
||||
),
|
||||
|
||||
# Task URLs
|
||||
|
||||
path('tasks',
|
||||
TasksView.as_view(),
|
||||
name='tasks'),
|
||||
path(
|
||||
'tasks',
|
||||
TasksView.as_view(),
|
||||
name='tasks',
|
||||
),
|
||||
|
||||
path('tasks-completed',
|
||||
CompletedTasksView.as_view(),
|
||||
name='tasks-completed'),
|
||||
path(
|
||||
'task/<int:pk>/schedule/now',
|
||||
TaskScheduleView.as_view(),
|
||||
name='run-task',
|
||||
),
|
||||
|
||||
path('tasks-reset',
|
||||
ResetTasks.as_view(),
|
||||
name='reset-tasks'),
|
||||
path(
|
||||
'task/<int:pk>/schedule/<int:timestamp>',
|
||||
TaskScheduleView.as_view(),
|
||||
name='schedule-task',
|
||||
),
|
||||
|
||||
path(
|
||||
'tasks-completed',
|
||||
CompletedTasksView.as_view(),
|
||||
name='tasks-completed',
|
||||
),
|
||||
|
||||
path(
|
||||
'tasks-reset',
|
||||
ResetTasks.as_view(),
|
||||
name='reset-tasks',
|
||||
),
|
||||
|
||||
# Media Server URLs
|
||||
|
||||
path('mediaservers',
|
||||
MediaServersView.as_view(),
|
||||
name='mediaservers'),
|
||||
path(
|
||||
'mediaservers',
|
||||
MediaServersView.as_view(),
|
||||
name='mediaservers',
|
||||
),
|
||||
|
||||
path('mediaserver-add/<slug:server_type>',
|
||||
AddMediaServerView.as_view(),
|
||||
name='add-mediaserver'),
|
||||
path(
|
||||
'mediaserver-add/<slug:server_type>',
|
||||
AddMediaServerView.as_view(),
|
||||
name='add-mediaserver',
|
||||
),
|
||||
|
||||
path('mediaserver/<int:pk>',
|
||||
MediaServerView.as_view(),
|
||||
name='mediaserver'),
|
||||
path(
|
||||
'mediaserver/<int:pk>',
|
||||
MediaServerView.as_view(),
|
||||
name='mediaserver',
|
||||
),
|
||||
|
||||
path('mediaserver-delete/<int:pk>',
|
||||
DeleteMediaServerView.as_view(),
|
||||
name='delete-mediaserver'),
|
||||
path(
|
||||
'mediaserver-delete/<int:pk>',
|
||||
DeleteMediaServerView.as_view(),
|
||||
name='delete-mediaserver',
|
||||
),
|
||||
|
||||
path('mediaserver-update/<int:pk>',
|
||||
UpdateMediaServerView.as_view(),
|
||||
name='update-mediaserver'),
|
||||
path(
|
||||
'mediaserver-update/<int:pk>',
|
||||
UpdateMediaServerView.as_view(),
|
||||
name='update-mediaserver',
|
||||
),
|
||||
|
||||
]
|
||||
|
@ -65,6 +65,7 @@ def get_remote_image(url, force_rgb=True):
|
||||
'(KHTML, like Gecko) Chrome/69.0.3497.64 Safari/537.36')
|
||||
}
|
||||
r = requests.get(url, headers=headers, stream=True, timeout=60)
|
||||
r.raise_for_status()
|
||||
r.raw.decode_content = True
|
||||
i = Image.open(r.raw)
|
||||
if force_rgb:
|
||||
|
@ -1,6 +1,5 @@
|
||||
import glob
|
||||
import os
|
||||
import json
|
||||
from base64 import b64decode
|
||||
import pathlib
|
||||
import sys
|
||||
@ -20,17 +19,20 @@ from django.utils.text import slugify
|
||||
from django.utils._os import safe_join
|
||||
from django.utils import timezone
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from common.timestamp import timestamp_to_datetime
|
||||
from common.utils import append_uri_params
|
||||
from background_task.models import Task, CompletedTask
|
||||
from .models import Source, Media, MediaServer
|
||||
from .forms import (ValidateSourceForm, ConfirmDeleteSourceForm, RedownloadMediaForm,
|
||||
SkipMediaForm, EnableMediaForm, ResetTasksForm,
|
||||
SkipMediaForm, EnableMediaForm, ResetTasksForm, ScheduleTaskForm,
|
||||
ConfirmDeleteMediaServerForm)
|
||||
from .utils import validate_url, delete_file, multi_key_sort
|
||||
from .utils import validate_url, delete_file, multi_key_sort, mkdir_p
|
||||
from .tasks import (map_task_to_instance, get_error_message,
|
||||
get_source_completed_tasks, get_media_download_task,
|
||||
delete_task_by_media, index_source_task, migrate_queues)
|
||||
from .choices import (Val, MediaServerType, SourceResolution,
|
||||
delete_task_by_media, index_source_task,
|
||||
download_media_thumbnail,
|
||||
check_source_directory_exists, migrate_queues)
|
||||
from .choices import (Val, MediaServerType, SourceResolution, IndexSchedule,
|
||||
YouTube_SourceType, youtube_long_source_types,
|
||||
youtube_help, youtube_validation_urls)
|
||||
from . import signals
|
||||
@ -166,6 +168,7 @@ class ValidateSourceView(FormView):
|
||||
template_name = 'sync/source-validate.html'
|
||||
form_class = ValidateSourceForm
|
||||
errors = {
|
||||
'invalid_source': _('Invalid type for the source.'),
|
||||
'invalid_url': _('Invalid URL, the URL must for a "{item}" must be in '
|
||||
'the format of "{example}". The error was: {error}.'),
|
||||
}
|
||||
@ -410,11 +413,39 @@ class DeleteSourceView(DeleteView, FormMixin):
|
||||
context_object_name = 'source'
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
source = self.get_object()
|
||||
media_source = dict(
|
||||
uuid=None,
|
||||
index_schedule=IndexSchedule.NEVER,
|
||||
download_media=False,
|
||||
index_videos=False,
|
||||
index_streams=False,
|
||||
filter_text=str(source.pk),
|
||||
)
|
||||
copy_fields = set(map(lambda f: f.name, source._meta.fields)) - set(media_source.keys())
|
||||
for k, v in source.__dict__.items():
|
||||
if k in copy_fields:
|
||||
media_source[k] = v
|
||||
media_source = Source(**media_source)
|
||||
delete_media_val = request.POST.get('delete_media', False)
|
||||
delete_media = True if delete_media_val is not False else False
|
||||
# overload this boolean for our own use
|
||||
media_source.delete_removed_media = delete_media
|
||||
# adjust the directory and key on the source to be deleted
|
||||
source.directory = source.directory + '/deleted'
|
||||
source.key = source.key + '/deleted'
|
||||
source.name = f'[Deleting] {source.name}'
|
||||
source.save(update_fields={'directory', 'key', 'name'})
|
||||
source.refresh_from_db()
|
||||
# save the new media source now that it is not a duplicate
|
||||
media_source.uuid = None
|
||||
media_source.save()
|
||||
media_source.refresh_from_db()
|
||||
# switch the media to the new source instance
|
||||
Media.objects.filter(source=source).update(source=media_source)
|
||||
if delete_media:
|
||||
source = self.get_object()
|
||||
directory_path = pathlib.Path(source.directory_path)
|
||||
directory_path = pathlib.Path(media_source.directory_path)
|
||||
mkdir_p(directory_path)
|
||||
(directory_path / '.to_be_removed').touch(exist_ok=True)
|
||||
return super().post(request, *args, **kwargs)
|
||||
|
||||
@ -493,6 +524,9 @@ class MediaThumbView(DetailView):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
media = self.get_object()
|
||||
# Thumbnail media is never updated so we can ask the browser to cache it
|
||||
# for ages, 604800 = 7 days
|
||||
max_age = 604800
|
||||
if media.thumb_file_exists:
|
||||
thumb_path = pathlib.Path(media.thumb.path)
|
||||
thumb = thumb_path.read_bytes()
|
||||
@ -502,10 +536,10 @@ class MediaThumbView(DetailView):
|
||||
thumb = b64decode('R0lGODlhAQABAIABAP///wAAACH5BAEKAAEALAA'
|
||||
'AAAABAAEAAAICTAEAOw==')
|
||||
content_type = 'image/gif'
|
||||
max_age = 600
|
||||
response = HttpResponse(thumb, content_type=content_type)
|
||||
# Thumbnail media is never updated so we can ask the browser to cache it
|
||||
# for ages, 604800 = 7 days
|
||||
response['Cache-Control'] = 'public, max-age=604800'
|
||||
|
||||
response['Cache-Control'] = f'public, max-age={max_age}'
|
||||
return response
|
||||
|
||||
|
||||
@ -517,6 +551,7 @@ class MediaItemView(DetailView):
|
||||
template_name = 'sync/media-item.html'
|
||||
model = Media
|
||||
messages = {
|
||||
'thumbnail': _('Thumbnail has been scheduled to redownload'),
|
||||
'redownloading': _('Media file has been deleted and scheduled to redownload'),
|
||||
'skipped': _('Media file has been deleted and marked to never download'),
|
||||
'enabled': _('Media has been re-enabled and will be downloaded'),
|
||||
@ -552,6 +587,24 @@ class MediaItemView(DetailView):
|
||||
data['media_file_path'] = pathlib.Path(self.object.media_file.path) if self.object.media_file else None
|
||||
return data
|
||||
|
||||
def get(self, *args, **kwargs):
|
||||
if args[0].path.startswith("/media-thumb-redownload/"):
|
||||
media = Media.objects.get(pk=kwargs["pk"])
|
||||
if media is None:
|
||||
return HttpResponseNotFound()
|
||||
|
||||
verbose_name = _('Redownload thumbnail for "{}": {}')
|
||||
download_media_thumbnail(
|
||||
str(media.pk),
|
||||
media.thumbnail,
|
||||
verbose_name=verbose_name.format(media.key, media.name),
|
||||
)
|
||||
url = reverse_lazy('sync:media-item', kwargs={'pk': media.pk})
|
||||
url = append_uri_params(url, {'message': 'thumbnail'})
|
||||
return HttpResponseRedirect(url)
|
||||
else:
|
||||
return super().get(self, *args, **kwargs)
|
||||
|
||||
|
||||
class MediaRedownloadView(FormView, SingleObjectMixin):
|
||||
'''
|
||||
@ -635,7 +688,7 @@ class MediaSkipView(FormView, SingleObjectMixin):
|
||||
for file in all_related_files:
|
||||
delete_file(file)
|
||||
# Reset all download data
|
||||
self.object.metadata = None
|
||||
self.object.metadata_clear()
|
||||
self.object.downloaded = False
|
||||
self.object.downloaded_audio_codec = None
|
||||
self.object.downloaded_video_codec = None
|
||||
@ -952,6 +1005,91 @@ class ResetTasks(FormView):
|
||||
return append_uri_params(url, {'message': 'reset'})
|
||||
|
||||
|
||||
class TaskScheduleView(FormView, SingleObjectMixin):
|
||||
'''
|
||||
Confirm that the task should be re-scheduled.
|
||||
'''
|
||||
|
||||
template_name = 'sync/task-schedule.html'
|
||||
form_class = ScheduleTaskForm
|
||||
model = Task
|
||||
errors = dict(
|
||||
invalid_when=_('The type ({}) was incorrect.'),
|
||||
when_before_now=_('The date and time must be in the future.'),
|
||||
)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.now = timezone.now()
|
||||
self.object = None
|
||||
self.timestamp = None
|
||||
self.when = None
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def dispatch(self, request, *args, **kwargs):
|
||||
self.now = timezone.now()
|
||||
self.object = self.get_object()
|
||||
self.timestamp = kwargs.get('timestamp')
|
||||
try:
|
||||
self.when = timestamp_to_datetime(self.timestamp)
|
||||
except AssertionError:
|
||||
self.when = None
|
||||
if self.when is None:
|
||||
self.when = self.now
|
||||
# Use the next minute and zero seconds
|
||||
# The web browser does not select seconds by default
|
||||
self.when = self.when.replace(second=0) + timezone.timedelta(minutes=1)
|
||||
return super().dispatch(request, *args, **kwargs)
|
||||
|
||||
def get_initial(self):
|
||||
initial = super().get_initial()
|
||||
initial['now'] = self.now
|
||||
initial['when'] = self.when
|
||||
return initial
|
||||
|
||||
def get_context_data(self, *args, **kwargs):
|
||||
data = super().get_context_data(*args, **kwargs)
|
||||
data['now'] = self.now
|
||||
data['when'] = self.when
|
||||
return data
|
||||
|
||||
def get_success_url(self):
|
||||
return append_uri_params(
|
||||
reverse_lazy('sync:tasks'),
|
||||
dict(
|
||||
message='scheduled',
|
||||
pk=str(self.object.pk),
|
||||
),
|
||||
)
|
||||
|
||||
def form_valid(self, form):
|
||||
max_attempts = getattr(settings, 'MAX_ATTEMPTS', 15)
|
||||
when = form.cleaned_data.get('when')
|
||||
|
||||
if not isinstance(when, self.now.__class__):
|
||||
form.add_error(
|
||||
'when',
|
||||
ValidationError(
|
||||
self.errors['invalid_when'].format(
|
||||
type(when),
|
||||
),
|
||||
),
|
||||
)
|
||||
if when < self.now:
|
||||
form.add_error(
|
||||
'when',
|
||||
ValidationError(self.errors['when_before_now']),
|
||||
)
|
||||
|
||||
if form.errors:
|
||||
return super().form_invalid(form)
|
||||
|
||||
self.object.attempts = max_attempts // 2
|
||||
self.object.run_at = max(self.now, when)
|
||||
self.object.save()
|
||||
|
||||
return super().form_valid(form)
|
||||
|
||||
|
||||
class MediaServersView(ListView):
|
||||
'''
|
||||
List of media servers which have been added.
|
||||
@ -1014,14 +1152,14 @@ class AddMediaServerView(FormView):
|
||||
def form_valid(self, form):
|
||||
# Assign mandatory fields, bundle other fields into options
|
||||
mediaserver = MediaServer(server_type=self.server_type)
|
||||
options = {}
|
||||
options = dict()
|
||||
model_fields = [field.name for field in MediaServer._meta.fields]
|
||||
for field_name, field_value in form.cleaned_data.items():
|
||||
if field_name in model_fields:
|
||||
setattr(mediaserver, field_name, field_value)
|
||||
else:
|
||||
options[field_name] = field_value
|
||||
mediaserver.options = json.dumps(options)
|
||||
mediaserver.options = options
|
||||
# Test the media server details are valid
|
||||
try:
|
||||
mediaserver.validate()
|
||||
@ -1128,21 +1266,21 @@ class UpdateMediaServerView(FormView, SingleObjectMixin):
|
||||
for field in self.object._meta.fields:
|
||||
if field.name in self.form_class.declared_fields:
|
||||
initial[field.name] = getattr(self.object, field.name)
|
||||
for option_key, option_val in self.object.loaded_options.items():
|
||||
for option_key, option_val in self.object.options.items():
|
||||
if option_key in self.form_class.declared_fields:
|
||||
initial[option_key] = option_val
|
||||
return initial
|
||||
|
||||
def form_valid(self, form):
|
||||
# Assign mandatory fields, bundle other fields into options
|
||||
options = {}
|
||||
options = dict()
|
||||
model_fields = [field.name for field in MediaServer._meta.fields]
|
||||
for field_name, field_value in form.cleaned_data.items():
|
||||
if field_name in model_fields:
|
||||
setattr(self.object, field_name, field_value)
|
||||
else:
|
||||
options[field_name] = field_value
|
||||
self.object.options = json.dumps(options)
|
||||
self.object.options = options
|
||||
# Test the media server details are valid
|
||||
try:
|
||||
self.object.validate()
|
||||
|
@ -20,7 +20,7 @@ from .utils import mkdir_p
|
||||
import yt_dlp
|
||||
import yt_dlp.patch.check_thumbnails
|
||||
import yt_dlp.patch.fatal_http_errors
|
||||
from yt_dlp.utils import remove_end, OUTTMPL_TYPES
|
||||
from yt_dlp.utils import remove_end, shell_quote, OUTTMPL_TYPES
|
||||
|
||||
|
||||
_defaults = getattr(settings, 'YOUTUBE_DEFAULTS', {})
|
||||
@ -198,6 +198,7 @@ def get_media_info(url, /, *, days=None, info_json=None):
|
||||
'clean_infojson': False,
|
||||
'daterange': yt_dlp.utils.DateRange(start=start),
|
||||
'extractor_args': {
|
||||
'youtube': {'formats': ['missing_pot']},
|
||||
'youtubetab': {'approximate_date': ['true']},
|
||||
},
|
||||
'outtmpl': outtmpl,
|
||||
@ -205,10 +206,14 @@ def get_media_info(url, /, *, days=None, info_json=None):
|
||||
'paths': paths,
|
||||
'postprocessors': postprocessors,
|
||||
'skip_unavailable_fragments': False,
|
||||
'sleep_interval_requests': 2 * settings.BACKGROUND_TASK_ASYNC_THREADS,
|
||||
'sleep_interval_requests': 1,
|
||||
'verbose': True if settings.DEBUG else False,
|
||||
'writeinfojson': True,
|
||||
})
|
||||
if settings.BACKGROUND_TASK_RUN_ASYNC:
|
||||
opts.update({
|
||||
'sleep_interval_requests': 2 * settings.BACKGROUND_TASK_ASYNC_THREADS,
|
||||
})
|
||||
if start:
|
||||
log.debug(f'get_media_info: used date range: {opts["daterange"]} for URL: {url}')
|
||||
response = {}
|
||||
@ -310,11 +315,36 @@ def download_media(
|
||||
if extension in audio_exts:
|
||||
pp_opts.extractaudio = True
|
||||
pp_opts.nopostoverwrites = False
|
||||
# The ExtractAudio post processor can change the extension.
|
||||
# This post processor is to change the final filename back
|
||||
# to what we are expecting it to be.
|
||||
final_path = Path(output_file)
|
||||
try:
|
||||
final_path = final_path.resolve(strict=True)
|
||||
except FileNotFoundError:
|
||||
# This is very likely the common case
|
||||
final_path = Path(output_file).resolve(strict=False)
|
||||
expected_file = shell_quote(str(final_path))
|
||||
cmds = pp_opts.exec_cmd.get('after_move', list())
|
||||
# It is important that we use a tuple for strings.
|
||||
# Otherwise, list adds each character instead.
|
||||
# That last comma is really necessary!
|
||||
cmds += (
|
||||
f'test -f {expected_file} || '
|
||||
'mv -T -u -- %(filepath,_filename|)q '
|
||||
f'{expected_file}',
|
||||
)
|
||||
# assignment is the quickest way to cover both 'get' cases
|
||||
pp_opts.exec_cmd['after_move'] = cmds
|
||||
elif '+' not in media_format:
|
||||
pp_opts.remuxvideo = extension
|
||||
|
||||
ytopts = {
|
||||
'format': media_format,
|
||||
'final_ext': extension,
|
||||
'merge_output_format': extension,
|
||||
'outtmpl': os.path.basename(output_file),
|
||||
'remuxvideo': pp_opts.remuxvideo,
|
||||
'quiet': False if settings.DEBUG else True,
|
||||
'verbose': True if settings.DEBUG else False,
|
||||
'noprogress': None if settings.DEBUG else True,
|
||||
@ -329,6 +359,7 @@ def download_media(
|
||||
'sleep_interval': 10,
|
||||
'max_sleep_interval': min(20*60, max(60, settings.DOWNLOAD_MEDIA_DELAY)),
|
||||
'sleep_interval_requests': 1 + (2 * settings.BACKGROUND_TASK_ASYNC_THREADS),
|
||||
'extractor_args': opts.get('extractor_args', dict()),
|
||||
'paths': opts.get('paths', dict()),
|
||||
'postprocessor_args': opts.get('postprocessor_args', dict()),
|
||||
'postprocessor_hooks': opts.get('postprocessor_hooks', list()),
|
||||
@ -352,6 +383,18 @@ def download_media(
|
||||
'temp': str(temp_dir_path),
|
||||
})
|
||||
|
||||
# Allow download of formats that tested good with 'missing_pot'
|
||||
youtube_ea_dict = ytopts['extractor_args'].get('youtube', dict())
|
||||
formats_list = youtube_ea_dict.get('formats', list())
|
||||
if 'missing_pot' not in formats_list:
|
||||
formats_list += ('missing_pot',)
|
||||
youtube_ea_dict.update({
|
||||
'formats': formats_list,
|
||||
})
|
||||
ytopts['extractor_args'].update({
|
||||
'youtube': youtube_ea_dict,
|
||||
})
|
||||
|
||||
postprocessor_hook_func = postprocessor_hook.get('function', None)
|
||||
if postprocessor_hook_func:
|
||||
ytopts['postprocessor_hooks'].append(postprocessor_hook_func)
|
||||
@ -377,6 +420,15 @@ def download_media(
|
||||
'modifychapters+ffmpeg': codec_options,
|
||||
})
|
||||
|
||||
# Provide the user control of 'overwrites' in the post processors.
|
||||
pp_opts.overwrites = opts.get(
|
||||
'overwrites',
|
||||
ytopts.get(
|
||||
'overwrites',
|
||||
default_opts.overwrites,
|
||||
),
|
||||
)
|
||||
|
||||
# Create the post processors list.
|
||||
# It already included user configured post processors as well.
|
||||
ytopts['postprocessors'] = list(yt_dlp.get_postprocessors(pp_opts))
|
||||
|
@ -59,9 +59,17 @@ else:
|
||||
}
|
||||
DATABASE_CONNECTION_STR = f'sqlite at "{DATABASES["default"]["NAME"]}"'
|
||||
|
||||
# the argument to random.expovariate(),
|
||||
# a larger value means less delay
|
||||
# with too little delay, you may see
|
||||
# more "database is locked" errors
|
||||
SQLITE_DELAY_FLOAT = 5
|
||||
|
||||
|
||||
DEFAULT_THREADS = 1
|
||||
BACKGROUND_TASK_ASYNC_THREADS = getenv('TUBESYNC_WORKERS', DEFAULT_THREADS, integer=True)
|
||||
if BACKGROUND_TASK_ASYNC_THREADS > 1:
|
||||
BACKGROUND_TASK_RUN_ASYNC = True
|
||||
|
||||
|
||||
MEDIA_ROOT = CONFIG_BASE_DIR / 'media'
|
||||
|
@ -1,3 +1,4 @@
|
||||
from django import VERSION as DJANGO_VERSION
|
||||
from pathlib import Path
|
||||
from common.utils import getenv
|
||||
|
||||
@ -99,7 +100,10 @@ AUTH_PASSWORD_VALIDATORS = [
|
||||
LANGUAGE_CODE = 'en-us'
|
||||
TIME_ZONE = getenv('TZ', 'UTC')
|
||||
USE_I18N = True
|
||||
USE_L10N = True
|
||||
# Removed in Django 5.0, set to True by default in Django 4.0
|
||||
# https://docs.djangoproject.com/en/4.1/releases/4.0/#localization
|
||||
if DJANGO_VERSION[0:3] < (4, 0, 0):
|
||||
USE_L10N = True
|
||||
USE_TZ = True
|
||||
|
||||
|
||||
@ -135,7 +139,7 @@ HEALTHCHECK_ALLOWED_IPS = ('127.0.0.1',)
|
||||
|
||||
|
||||
MAX_ATTEMPTS = 15 # Number of times tasks will be retried
|
||||
MAX_RUN_TIME = 1*(24*60*60) # Maximum amount of time in seconds a task can run
|
||||
MAX_RUN_TIME = 12*(60*60) # Maximum amount of time in seconds a task can run
|
||||
BACKGROUND_TASK_RUN_ASYNC = False # Run tasks async in the background
|
||||
BACKGROUND_TASK_ASYNC_THREADS = 1 # Number of async tasks to run at once
|
||||
MAX_BACKGROUND_TASK_ASYNC_THREADS = 8 # For sanity reasons
|
||||
@ -173,6 +177,8 @@ YOUTUBE_DEFAULTS = {
|
||||
'cachedir': False, # Disable on-disk caching
|
||||
'addmetadata': True, # Embed metadata during postprocessing where available
|
||||
'geo_verification_proxy': getenv('geo_verification_proxy').strip() or None,
|
||||
'max_sleep_interval': (60)*5,
|
||||
'sleep_interval': 0.25,
|
||||
}
|
||||
COOKIES_FILE = CONFIG_BASE_DIR / 'cookies.txt'
|
||||
|
||||
@ -210,10 +216,7 @@ except:
|
||||
if MAX_RUN_TIME < 600:
|
||||
MAX_RUN_TIME = 600
|
||||
|
||||
DOWNLOAD_MEDIA_DELAY = 60 + (MAX_RUN_TIME / 50)
|
||||
|
||||
if RENAME_SOURCES or RENAME_ALL_SOURCES:
|
||||
BACKGROUND_TASK_ASYNC_THREADS += 1
|
||||
DOWNLOAD_MEDIA_DELAY = 1 + round(MAX_RUN_TIME / 100)
|
||||
|
||||
if BACKGROUND_TASK_ASYNC_THREADS > MAX_BACKGROUND_TASK_ASYNC_THREADS:
|
||||
BACKGROUND_TASK_ASYNC_THREADS = MAX_BACKGROUND_TASK_ASYNC_THREADS
|
||||
|
Loading…
Reference in New Issue
Block a user