Эх сурвалжийг харах

enable cond cache by default

AUTOMATIC1111 2 жил өмнө
parent
commit
57e8a11d17

+ 18 - 13
modules/processing.py

@@ -295,7 +295,7 @@ class StableDiffusionProcessing:
         self.sampler = None
         self.sampler = None
         self.c = None
         self.c = None
         self.uc = None
         self.uc = None
-        if not opts.experimental_persistent_cond_cache:
+        if not opts.persistent_cond_cache:
             StableDiffusionProcessing.cached_c = [None, None]
             StableDiffusionProcessing.cached_c = [None, None]
             StableDiffusionProcessing.cached_uc = [None, None]
             StableDiffusionProcessing.cached_uc = [None, None]
 
 
@@ -319,6 +319,21 @@ class StableDiffusionProcessing:
         self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts]
         self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts]
         self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts]
         self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts]
 
 
+    def cached_params(self, required_prompts, steps, extra_network_data):
+        """Returns parameters that invalidate the cond cache if changed"""
+
+        return (
+            required_prompts,
+            steps,
+            opts.CLIP_stop_at_last_layers,
+            shared.sd_model.sd_checkpoint_info,
+            extra_network_data,
+            opts.sdxl_crop_left,
+            opts.sdxl_crop_top,
+            self.width,
+            self.height,
+        )
+
     def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data):
     def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data):
         """
         """
         Returns the result of calling function(shared.sd_model, required_prompts, steps)
         Returns the result of calling function(shared.sd_model, required_prompts, steps)
@@ -332,17 +347,7 @@ class StableDiffusionProcessing:
         caches is a list with items described above.
         caches is a list with items described above.
         """
         """
 
 
-        cached_params = (
-            required_prompts,
-            steps,
-            opts.CLIP_stop_at_last_layers,
-            shared.sd_model.sd_checkpoint_info,
-            extra_network_data,
-            opts.sdxl_crop_left,
-            opts.sdxl_crop_top,
-            self.width,
-            self.height,
-        )
+        cached_params = self.cached_params(required_prompts, steps, extra_network_data)
 
 
         for cache in caches:
         for cache in caches:
             if cache[0] is not None and cached_params == cache[0]:
             if cache[0] is not None and cached_params == cache[0]:
@@ -1184,7 +1189,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
         super().close()
         super().close()
         self.hr_c = None
         self.hr_c = None
         self.hr_uc = None
         self.hr_uc = None
-        if not opts.experimental_persistent_cond_cache:
+        if not opts.persistent_cond_cache:
             StableDiffusionProcessingTxt2Img.cached_hr_uc = [None, None]
             StableDiffusionProcessingTxt2Img.cached_hr_uc = [None, None]
             StableDiffusionProcessingTxt2Img.cached_hr_c = [None, None]
             StableDiffusionProcessingTxt2Img.cached_hr_c = [None, None]
 
 

+ 1 - 1
modules/shared.py

@@ -506,7 +506,7 @@ options_templates.update(options_section(('optimizations', "Optimizations"), {
     "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
     "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
     "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
     "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
     "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
     "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
-    "experimental_persistent_cond_cache": OptionInfo(False, "persistent cond cache").info("Experimental, keep cond caches across jobs, reduce overhead."),
+    "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"),
 }))
 }))
 
 
 options_templates.update(options_section(('compatibility', "Compatibility"), {
 options_templates.update(options_section(('compatibility', "Compatibility"), {