Merge pull request #685 from tcely/patch-2

Wait for video premiere
This commit is contained in:
meeb 2025-02-04 13:33:02 +11:00 committed by GitHub
commit 4b08062761
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -28,6 +28,7 @@ from .models import Source, Media, MediaServer
from .utils import (get_remote_image, resize_image_to_height, delete_file,
write_text_file, filter_response)
from .filtering import filter_media
from .youtube import YouTubeError
def get_hash(task_name, pk):
@ -52,6 +53,7 @@ def map_task_to_instance(task):
'sync.tasks.download_media_metadata': Media,
'sync.tasks.save_all_media_for_source': Source,
'sync.tasks.rename_all_media_for_source': Source,
'sync.tasks.wait_for_media_premiere': Media,
}
MODEL_URL_MAP = {
Source: 'sync:source',
@ -126,6 +128,13 @@ def get_media_metadata_task(media_id):
except IndexError:
return False
def get_media_premiere_task(media_id):
try:
return Task.objects.get_task('sync.tasks.wait_for_media_premiere',
args=(str(media_id),))[0]
except IndexError:
return False
def delete_task_by_source(task_name, source_id):
return Task.objects.filter(task_name=task_name, queue=str(source_id)).delete()
@ -304,7 +313,51 @@ def download_media_metadata(media_id):
log.info(f'Task for ID: {media_id} / {media} skipped, due to task being manually skipped.')
return
source = media.source
metadata = media.index_metadata()
try:
metadata = media.index_metadata()
except YouTubeError as e:
e_str = str(e)
log_exception = True
if ': Premieres in ' in e_str:
now = timezone.now()
published_datetime = None
parts = e_str.split(': ', 1)[1].rsplit(' ', 2)
unit = lambda p: str(p[-1]).lower()
number = lambda p: int(str(p[-2]), base=10)
log.debug(parts)
try:
if 'days' == unit(parts):
published_datetime = now + timedelta(days=number(parts))
if 'hours' == unit(parts):
published_datetime = now + timedelta(hours=number(parts))
if 'minutes' == unit(parts):
published_datetime = now + timedelta(minutes=number(parts))
log.debug(unit(parts))
log.debug(number(parts))
except Exception as ee:
log.exception(ee)
pass
if published_datetime:
media.published = published_datetime
media.manual_skip = True
media.save()
verbose_name = _('Waiting for the premiere of "{}" at: {}')
wait_for_media_premiere(
str(media.pk),
priority=0,
queue=str(media.pk),
repeat=Task.HOURLY,
repeat_until = published_datetime + timedelta(hours=1),
verbose_name=verbose_name.format(media.key, published_datetime.isoformat(' ', 'seconds')),
remove_existing_tasks=True,
)
log_exception = False
if log_exception:
log.exception(e)
log.debug(str(e))
return
response = metadata
if getattr(settings, 'SHRINK_NEW_MEDIA_METADATA', False):
response = filter_response(metadata, True)
@ -531,4 +584,29 @@ def rename_all_media_for_source(source_id):
for media in Media.objects.filter(source=source):
media.rename_files()
@background(schedule=0)
def wait_for_media_premiere(media_id):
td = lambda p, now=timezone.now(): (p - now)
hours = lambda td: 1+int((24*td.days)+(td.seconds/(60*60)))
try:
media = Media.objects.get(pk=media_id)
except Media.DoesNotExist:
return
if media.metadata:
return
if media.published < timezone.now():
media.manual_skip = False
media.skip = False
# start the download tasks
media.save()
else:
media.manual_skip = True
media.title = _(f'Premieres in {hours(td(media.published))} hours')
task = get_media_premiere_task(str(media.pk))
if task:
task.verbose_name = _(f'Waiting for premiere of "{media.key}" '
f'in {hours(td(media.published))} hours')
task.save()
media.save()