get attention optimizations to work

This commit is contained in:
AUTOMATIC1111
2023-07-13 09:30:33 +03:00
parent b717eb7e56
commit ac4ccfa136
4 changed files with 12 additions and 8 deletions

View File

@@ -55,3 +55,6 @@ sgm.modules.diffusionmodules.model.print = lambda *args: None
sgm.modules.diffusionmodules.openaimodel.print = lambda *args: None
sgm.modules.encoders.modules.print = lambda *args: None
# this gets the code to load the vanilla attention that we override
sgm.modules.attention.SDP_IS_AVAILABLE = True
sgm.modules.attention.XFORMERS_IS_AVAILABLE = False