Instance of CUDA out of memory on a low-res batch, even with --opt-split-attention-v1 (found cause) #255

This commit is contained in:
AUTOMATIC
2022-09-12 18:40:06 +03:00
parent 535b25ad26
commit 843b2b64fc
4 changed files with 31 additions and 21 deletions

View File

@@ -5,7 +5,7 @@ import traceback
import cv2
import torch
from modules import shared
from modules import shared, devices
from modules.paths import script_path
import modules.shared
import modules.face_restoration
@@ -51,6 +51,7 @@ def setup_codeformer():
def create_models(self):
if self.net is not None and self.face_helper is not None:
self.net.to(shared.device)
return self.net, self.face_helper
net = net_class(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, connect_list=['32', '64', '128', '256']).to(shared.device)
@@ -61,9 +62,9 @@ def setup_codeformer():
face_helper = FaceRestoreHelper(1, face_size=512, crop_ratio=(1, 1), det_model='retinaface_resnet50', save_ext='png', use_parse=True, device=shared.device)
if not cmd_opts.unload_gfpgan:
self.net = net
self.face_helper = face_helper
self.net = net
self.face_helper = face_helper
self.net.to(shared.device)
return net, face_helper
@@ -72,20 +73,20 @@ def setup_codeformer():
original_resolution = np_image.shape[0:2]
net, face_helper = self.create_models()
face_helper.clean_all()
face_helper.read_image(np_image)
face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
face_helper.align_warp_face()
self.create_models()
self.face_helper.clean_all()
self.face_helper.read_image(np_image)
self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
self.face_helper.align_warp_face()
for idx, cropped_face in enumerate(face_helper.cropped_faces):
for idx, cropped_face in enumerate(self.face_helper.cropped_faces):
cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
cropped_face_t = cropped_face_t.unsqueeze(0).to(shared.device)
try:
with torch.no_grad():
output = net(cropped_face_t, w=w if w is not None else shared.opts.code_former_weight, adain=True)[0]
output = self.net(cropped_face_t, w=w if w is not None else shared.opts.code_former_weight, adain=True)[0]
restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
del output
torch.cuda.empty_cache()
@@ -94,16 +95,19 @@ def setup_codeformer():
restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
restored_face = restored_face.astype('uint8')
face_helper.add_restored_face(restored_face)
self.face_helper.add_restored_face(restored_face)
face_helper.get_inverse_affine(None)
self.face_helper.get_inverse_affine(None)
restored_img = face_helper.paste_faces_to_input_image()
restored_img = self.face_helper.paste_faces_to_input_image()
restored_img = restored_img[:, :, ::-1]
if original_resolution != restored_img.shape[0:2]:
restored_img = cv2.resize(restored_img, (0, 0), fx=original_resolution[1]/restored_img.shape[1], fy=original_resolution[0]/restored_img.shape[0], interpolation=cv2.INTER_LINEAR)
if shared.opts.face_restoration_unload:
self.net.to(devices.cpu)
return restored_img
global have_codeformer