move postprocessing-for-training into builtin extensions

This commit is contained in:
Andray
2024-03-12 01:47:23 +04:00
parent 3e0146f9bd
commit 4079b17dd9
5 changed files with 0 additions and 0 deletions

View File

@@ -1,30 +0,0 @@
from modules import scripts_postprocessing, ui_components, deepbooru, shared
import gradio as gr
class ScriptPostprocessingCeption(scripts_postprocessing.ScriptPostprocessing):
name = "Caption"
order = 4040
def ui(self):
with ui_components.InputAccordion(False, label="Caption") as enable:
option = gr.CheckboxGroup(value=["Deepbooru"], choices=["Deepbooru", "BLIP"], show_label=False)
return {
"enable": enable,
"option": option,
}
def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, option):
if not enable:
return
captions = [pp.caption]
if "Deepbooru" in option:
captions.append(deepbooru.model.tag(pp.image))
if "BLIP" in option:
captions.append(shared.interrogator.interrogate(pp.image.convert("RGB")))
pp.caption = ", ".join([x for x in captions if x])

View File

@@ -1,32 +0,0 @@
from PIL import ImageOps, Image
from modules import scripts_postprocessing, ui_components
import gradio as gr
class ScriptPostprocessingCreateFlippedCopies(scripts_postprocessing.ScriptPostprocessing):
name = "Create flipped copies"
order = 4030
def ui(self):
with ui_components.InputAccordion(False, label="Create flipped copies") as enable:
with gr.Row():
option = gr.CheckboxGroup(value=["Horizontal"], choices=["Horizontal", "Vertical", "Both"], show_label=False)
return {
"enable": enable,
"option": option,
}
def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, option):
if not enable:
return
if "Horizontal" in option:
pp.extra_images.append(ImageOps.mirror(pp.image))
if "Vertical" in option:
pp.extra_images.append(pp.image.transpose(Image.Transpose.FLIP_TOP_BOTTOM))
if "Both" in option:
pp.extra_images.append(pp.image.transpose(Image.Transpose.FLIP_TOP_BOTTOM).transpose(Image.Transpose.FLIP_LEFT_RIGHT))

View File

@@ -1,54 +0,0 @@
from modules import scripts_postprocessing, ui_components, errors
import gradio as gr
from modules.textual_inversion import autocrop
class ScriptPostprocessingFocalCrop(scripts_postprocessing.ScriptPostprocessing):
name = "Auto focal point crop"
order = 4010
def ui(self):
with ui_components.InputAccordion(False, label="Auto focal point crop") as enable:
face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_face_weight")
entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_entropy_weight")
edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_edges_weight")
debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
return {
"enable": enable,
"face_weight": face_weight,
"entropy_weight": entropy_weight,
"edges_weight": edges_weight,
"debug": debug,
}
def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, face_weight, entropy_weight, edges_weight, debug):
if not enable:
return
if not pp.shared.target_width or not pp.shared.target_height:
return
dnn_model_path = None
try:
dnn_model_path = autocrop.download_and_cache_models()
except Exception:
errors.report("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", exc_info=True)
autocrop_settings = autocrop.Settings(
crop_width=pp.shared.target_width,
crop_height=pp.shared.target_height,
face_points_weight=face_weight,
entropy_points_weight=entropy_weight,
corner_points_weight=edges_weight,
annotate_image=debug,
dnn_model_path=dnn_model_path,
)
result, *others = autocrop.crop_image(pp.image, autocrop_settings)
pp.image = result
pp.extra_images = [pp.create_copy(x, nametags=["focal-crop-debug"], disable_processing=True) for x in others]

View File

@@ -1,71 +0,0 @@
import math
from modules import scripts_postprocessing, ui_components
import gradio as gr
def split_pic(image, inverse_xy, width, height, overlap_ratio):
if inverse_xy:
from_w, from_h = image.height, image.width
to_w, to_h = height, width
else:
from_w, from_h = image.width, image.height
to_w, to_h = width, height
h = from_h * to_w // from_w
if inverse_xy:
image = image.resize((h, to_w))
else:
image = image.resize((to_w, h))
split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio)))
y_step = (h - to_h) / (split_count - 1)
for i in range(split_count):
y = int(y_step * i)
if inverse_xy:
splitted = image.crop((y, 0, y + to_h, to_w))
else:
splitted = image.crop((0, y, to_w, y + to_h))
yield splitted
class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostprocessing):
name = "Split oversized images"
order = 4000
def ui(self):
with ui_components.InputAccordion(False, label="Split oversized images") as enable:
with gr.Row():
split_threshold = gr.Slider(label='Threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_split_threshold")
overlap_ratio = gr.Slider(label='Overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="postprocess_overlap_ratio")
return {
"enable": enable,
"split_threshold": split_threshold,
"overlap_ratio": overlap_ratio,
}
def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, split_threshold, overlap_ratio):
if not enable:
return
width = pp.shared.target_width
height = pp.shared.target_height
if not width or not height:
return
if pp.image.height > pp.image.width:
ratio = (pp.image.width * height) / (pp.image.height * width)
inverse_xy = False
else:
ratio = (pp.image.height * width) / (pp.image.width * height)
inverse_xy = True
if ratio >= 1.0 or ratio > split_threshold:
return
result, *others = split_pic(pp.image, inverse_xy, width, height, overlap_ratio)
pp.image = result
pp.extra_images = [pp.create_copy(x) for x in others]

View File

@@ -1,64 +0,0 @@
from PIL import Image
from modules import scripts_postprocessing, ui_components
import gradio as gr
def center_crop(image: Image, w: int, h: int):
iw, ih = image.size
if ih / h < iw / w:
sw = w * ih / h
box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih
else:
sh = h * iw / w
box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2
return image.resize((w, h), Image.Resampling.LANCZOS, box)
def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold):
iw, ih = image.size
err = lambda w, h: 1 - (lambda x: x if x < 1 else 1 / x)(iw / ih / (w / h))
wh = max(((w, h) for w in range(mindim, maxdim + 1, 64) for h in range(mindim, maxdim + 1, 64)
if minarea <= w * h <= maxarea and err(w, h) <= threshold),
key=lambda wh: (wh[0] * wh[1], -err(*wh))[::1 if objective == 'Maximize area' else -1],
default=None
)
return wh and center_crop(image, *wh)
class ScriptPostprocessingAutosizedCrop(scripts_postprocessing.ScriptPostprocessing):
name = "Auto-sized crop"
order = 4020
def ui(self):
with ui_components.InputAccordion(False, label="Auto-sized crop") as enable:
gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
with gr.Row():
mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="postprocess_multicrop_mindim")
maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="postprocess_multicrop_maxdim")
with gr.Row():
minarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area lower bound", value=64 * 64, elem_id="postprocess_multicrop_minarea")
maxarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area upper bound", value=640 * 640, elem_id="postprocess_multicrop_maxarea")
with gr.Row():
objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="postprocess_multicrop_objective")
threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="postprocess_multicrop_threshold")
return {
"enable": enable,
"mindim": mindim,
"maxdim": maxdim,
"minarea": minarea,
"maxarea": maxarea,
"objective": objective,
"threshold": threshold,
}
def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, mindim, maxdim, minarea, maxarea, objective, threshold):
if not enable:
return
cropped = multicrop_pic(pp.image, mindim, maxdim, minarea, maxarea, objective, threshold)
if cropped is not None:
pp.image = cropped
else:
print(f"skipped {pp.image.width}x{pp.image.height} image (can't find suitable size within error threshold)")