fix memory leak when generation fails

This commit is contained in:
AUTOMATIC1111
2023-07-31 22:01:53 +03:00
parent 0d577aba26
commit c10633f93a
3 changed files with 10 additions and 3 deletions

View File

@@ -3,7 +3,7 @@ import html
import threading
import time
from modules import shared, progress, errors
from modules import shared, progress, errors, devices
queue_lock = threading.Lock()
@@ -75,6 +75,8 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
error_message = f'{type(e).__name__}: {e}'
res = extra_outputs_array + [f"<div class='error'>{html.escape(error_message)}</div>"]
devices.torch_gc()
shared.state.skipped = False
shared.state.interrupted = False
shared.state.job_count = 0