processing.py 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387
  1. import json
  2. import logging
  3. import math
  4. import os
  5. import sys
  6. import hashlib
  7. import torch
  8. import numpy as np
  9. from PIL import Image, ImageOps
  10. import random
  11. import cv2
  12. from skimage import exposure
  13. from typing import Any, Dict, List
  14. import modules.sd_hijack
  15. from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors
  16. from modules.sd_hijack import model_hijack
  17. from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes
  18. from modules.shared import opts, cmd_opts, state
  19. import modules.shared as shared
  20. import modules.paths as paths
  21. import modules.face_restoration
  22. import modules.images as images
  23. import modules.styles
  24. import modules.sd_models as sd_models
  25. import modules.sd_vae as sd_vae
  26. from ldm.data.util import AddMiDaS
  27. from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion
  28. from einops import repeat, rearrange
  29. from blendmodes.blend import blendLayers, BlendType
  30. # some of those options should not be changed at all because they would break the model, so I removed them from options.
  31. opt_C = 4
  32. opt_f = 8
  33. def setup_color_correction(image):
  34. logging.info("Calibrating color correction.")
  35. correction_target = cv2.cvtColor(np.asarray(image.copy()), cv2.COLOR_RGB2LAB)
  36. return correction_target
  37. def apply_color_correction(correction, original_image):
  38. logging.info("Applying color correction.")
  39. image = Image.fromarray(cv2.cvtColor(exposure.match_histograms(
  40. cv2.cvtColor(
  41. np.asarray(original_image),
  42. cv2.COLOR_RGB2LAB
  43. ),
  44. correction,
  45. channel_axis=2
  46. ), cv2.COLOR_LAB2RGB).astype("uint8"))
  47. image = blendLayers(image, original_image, BlendType.LUMINOSITY)
  48. return image
  49. def apply_overlay(image, paste_loc, index, overlays):
  50. if overlays is None or index >= len(overlays):
  51. return image
  52. overlay = overlays[index]
  53. if paste_loc is not None:
  54. x, y, w, h = paste_loc
  55. base_image = Image.new('RGBA', (overlay.width, overlay.height))
  56. image = images.resize_image(1, image, w, h)
  57. base_image.paste(image, (x, y))
  58. image = base_image
  59. image = image.convert('RGBA')
  60. image.alpha_composite(overlay)
  61. image = image.convert('RGB')
  62. return image
  63. def txt2img_image_conditioning(sd_model, x, width, height):
  64. if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models
  65. # The "masked-image" in this case will just be all zeros since the entire image is masked.
  66. image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
  67. image_conditioning = images_tensor_to_samples(image_conditioning, approximation_indexes.get(opts.sd_vae_encode_method))
  68. # Add the fake full 1s mask to the first dimension.
  69. image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
  70. image_conditioning = image_conditioning.to(x.dtype)
  71. return image_conditioning
  72. elif sd_model.model.conditioning_key == "crossattn-adm": # UnCLIP models
  73. return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
  74. else:
  75. # Dummy zero conditioning if we're not using inpainting or unclip models.
  76. # Still takes up a bit of memory, but no encoder call.
  77. # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
  78. return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
  79. class StableDiffusionProcessing:
  80. """
  81. The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
  82. """
  83. cached_uc = [None, None]
  84. cached_c = [None, None]
  85. def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
  86. if sampler_index is not None:
  87. print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
  88. self.outpath_samples: str = outpath_samples
  89. self.outpath_grids: str = outpath_grids
  90. self.prompt: str = prompt
  91. self.prompt_for_display: str = None
  92. self.negative_prompt: str = (negative_prompt or "")
  93. self.styles: list = styles or []
  94. self.seed: int = seed
  95. self.subseed: int = subseed
  96. self.subseed_strength: float = subseed_strength
  97. self.seed_resize_from_h: int = seed_resize_from_h
  98. self.seed_resize_from_w: int = seed_resize_from_w
  99. self.sampler_name: str = sampler_name
  100. self.batch_size: int = batch_size
  101. self.n_iter: int = n_iter
  102. self.steps: int = steps
  103. self.cfg_scale: float = cfg_scale
  104. self.width: int = width
  105. self.height: int = height
  106. self.restore_faces: bool = restore_faces
  107. self.tiling: bool = tiling
  108. self.do_not_save_samples: bool = do_not_save_samples
  109. self.do_not_save_grid: bool = do_not_save_grid
  110. self.extra_generation_params: dict = extra_generation_params or {}
  111. self.overlay_images = overlay_images
  112. self.eta = eta
  113. self.do_not_reload_embeddings = do_not_reload_embeddings
  114. self.paste_to = None
  115. self.color_corrections = None
  116. self.denoising_strength: float = denoising_strength
  117. self.sampler_noise_scheduler_override = None
  118. self.ddim_discretize = ddim_discretize or opts.ddim_discretize
  119. self.s_min_uncond = s_min_uncond or opts.s_min_uncond
  120. self.s_churn = s_churn or opts.s_churn
  121. self.s_tmin = s_tmin or opts.s_tmin
  122. self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
  123. self.s_noise = s_noise or opts.s_noise
  124. self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts}
  125. self.override_settings_restore_afterwards = override_settings_restore_afterwards
  126. self.is_using_inpainting_conditioning = False
  127. self.disable_extra_networks = False
  128. self.token_merging_ratio = 0
  129. self.token_merging_ratio_hr = 0
  130. if not seed_enable_extras:
  131. self.subseed = -1
  132. self.subseed_strength = 0
  133. self.seed_resize_from_h = 0
  134. self.seed_resize_from_w = 0
  135. self.scripts = None
  136. self.script_args = script_args
  137. self.all_prompts = None
  138. self.all_negative_prompts = None
  139. self.all_seeds = None
  140. self.all_subseeds = None
  141. self.iteration = 0
  142. self.is_hr_pass = False
  143. self.sampler = None
  144. self.prompts = None
  145. self.negative_prompts = None
  146. self.extra_network_data = None
  147. self.seeds = None
  148. self.subseeds = None
  149. self.step_multiplier = 1
  150. self.cached_uc = StableDiffusionProcessing.cached_uc
  151. self.cached_c = StableDiffusionProcessing.cached_c
  152. self.uc = None
  153. self.c = None
  154. self.user = None
  155. @property
  156. def sd_model(self):
  157. return shared.sd_model
  158. def txt2img_image_conditioning(self, x, width=None, height=None):
  159. self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'}
  160. return txt2img_image_conditioning(self.sd_model, x, width or self.width, height or self.height)
  161. def depth2img_image_conditioning(self, source_image):
  162. # Use the AddMiDaS helper to Format our source image to suit the MiDaS model
  163. transformer = AddMiDaS(model_type="dpt_hybrid")
  164. transformed = transformer({"jpg": rearrange(source_image[0], "c h w -> h w c")})
  165. midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device)
  166. midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size)
  167. conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method))
  168. conditioning = torch.nn.functional.interpolate(
  169. self.sd_model.depth_model(midas_in),
  170. size=conditioning_image.shape[2:],
  171. mode="bicubic",
  172. align_corners=False,
  173. )
  174. (depth_min, depth_max) = torch.aminmax(conditioning)
  175. conditioning = 2. * (conditioning - depth_min) / (depth_max - depth_min) - 1.
  176. return conditioning
  177. def edit_image_conditioning(self, source_image):
  178. conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method))
  179. return conditioning_image
  180. def unclip_image_conditioning(self, source_image):
  181. c_adm = self.sd_model.embedder(source_image)
  182. if self.sd_model.noise_augmentor is not None:
  183. noise_level = 0 # TODO: Allow other noise levels?
  184. c_adm, noise_level_emb = self.sd_model.noise_augmentor(c_adm, noise_level=repeat(torch.tensor([noise_level]).to(c_adm.device), '1 -> b', b=c_adm.shape[0]))
  185. c_adm = torch.cat((c_adm, noise_level_emb), 1)
  186. return c_adm
  187. def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None):
  188. self.is_using_inpainting_conditioning = True
  189. # Handle the different mask inputs
  190. if image_mask is not None:
  191. if torch.is_tensor(image_mask):
  192. conditioning_mask = image_mask
  193. else:
  194. conditioning_mask = np.array(image_mask.convert("L"))
  195. conditioning_mask = conditioning_mask.astype(np.float32) / 255.0
  196. conditioning_mask = torch.from_numpy(conditioning_mask[None, None])
  197. # Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0
  198. conditioning_mask = torch.round(conditioning_mask)
  199. else:
  200. conditioning_mask = source_image.new_ones(1, 1, *source_image.shape[-2:])
  201. # Create another latent image, this time with a masked version of the original input.
  202. # Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
  203. conditioning_mask = conditioning_mask.to(device=source_image.device, dtype=source_image.dtype)
  204. conditioning_image = torch.lerp(
  205. source_image,
  206. source_image * (1.0 - conditioning_mask),
  207. getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight)
  208. )
  209. # Encode the new masked image using first stage of network.
  210. conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))
  211. # Create the concatenated conditioning tensor to be fed to `c_concat`
  212. conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:])
  213. conditioning_mask = conditioning_mask.expand(conditioning_image.shape[0], -1, -1, -1)
  214. image_conditioning = torch.cat([conditioning_mask, conditioning_image], dim=1)
  215. image_conditioning = image_conditioning.to(shared.device).type(self.sd_model.dtype)
  216. return image_conditioning
  217. def img2img_image_conditioning(self, source_image, latent_image, image_mask=None):
  218. source_image = devices.cond_cast_float(source_image)
  219. # HACK: Using introspection as the Depth2Image model doesn't appear to uniquely
  220. # identify itself with a field common to all models. The conditioning_key is also hybrid.
  221. if isinstance(self.sd_model, LatentDepth2ImageDiffusion):
  222. return self.depth2img_image_conditioning(source_image)
  223. if self.sd_model.cond_stage_key == "edit":
  224. return self.edit_image_conditioning(source_image)
  225. if self.sampler.conditioning_key in {'hybrid', 'concat'}:
  226. return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
  227. if self.sampler.conditioning_key == "crossattn-adm":
  228. return self.unclip_image_conditioning(source_image)
  229. # Dummy zero conditioning if we're not using inpainting or depth model.
  230. return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
  231. def init(self, all_prompts, all_seeds, all_subseeds):
  232. pass
  233. def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
  234. raise NotImplementedError()
  235. def close(self):
  236. self.sampler = None
  237. self.c = None
  238. self.uc = None
  239. if not opts.experimental_persistent_cond_cache:
  240. StableDiffusionProcessing.cached_c = [None, None]
  241. StableDiffusionProcessing.cached_uc = [None, None]
  242. def get_token_merging_ratio(self, for_hr=False):
  243. if for_hr:
  244. return self.token_merging_ratio_hr or opts.token_merging_ratio_hr or self.token_merging_ratio or opts.token_merging_ratio
  245. return self.token_merging_ratio or opts.token_merging_ratio
  246. def setup_prompts(self):
  247. if type(self.prompt) == list:
  248. self.all_prompts = self.prompt
  249. else:
  250. self.all_prompts = self.batch_size * self.n_iter * [self.prompt]
  251. if type(self.negative_prompt) == list:
  252. self.all_negative_prompts = self.negative_prompt
  253. else:
  254. self.all_negative_prompts = self.batch_size * self.n_iter * [self.negative_prompt]
  255. self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts]
  256. self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts]
  257. def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data):
  258. """
  259. Returns the result of calling function(shared.sd_model, required_prompts, steps)
  260. using a cache to store the result if the same arguments have been used before.
  261. cache is an array containing two elements. The first element is a tuple
  262. representing the previously used arguments, or None if no arguments
  263. have been used before. The second element is where the previously
  264. computed result is stored.
  265. caches is a list with items described above.
  266. """
  267. cached_params = (
  268. required_prompts,
  269. steps,
  270. opts.CLIP_stop_at_last_layers,
  271. shared.sd_model.sd_checkpoint_info,
  272. extra_network_data,
  273. opts.sdxl_crop_left,
  274. opts.sdxl_crop_top,
  275. self.width,
  276. self.height,
  277. )
  278. for cache in caches:
  279. if cache[0] is not None and cached_params == cache[0]:
  280. return cache[1]
  281. cache = caches[0]
  282. with devices.autocast():
  283. cache[1] = function(shared.sd_model, required_prompts, steps)
  284. cache[0] = cached_params
  285. return cache[1]
  286. def setup_conds(self):
  287. prompts = prompt_parser.SdConditioning(self.prompts, width=self.width, height=self.height)
  288. negative_prompts = prompt_parser.SdConditioning(self.negative_prompts, width=self.width, height=self.height, is_negative_prompt=True)
  289. sampler_config = sd_samplers.find_sampler_config(self.sampler_name)
  290. self.step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1
  291. self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, self.steps * self.step_multiplier, [self.cached_uc], self.extra_network_data)
  292. self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, self.steps * self.step_multiplier, [self.cached_c], self.extra_network_data)
  293. def parse_extra_network_prompts(self):
  294. self.prompts, self.extra_network_data = extra_networks.parse_prompts(self.prompts)
  295. class Processed:
  296. def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""):
  297. self.images = images_list
  298. self.prompt = p.prompt
  299. self.negative_prompt = p.negative_prompt
  300. self.seed = seed
  301. self.subseed = subseed
  302. self.subseed_strength = p.subseed_strength
  303. self.info = info
  304. self.comments = comments
  305. self.width = p.width
  306. self.height = p.height
  307. self.sampler_name = p.sampler_name
  308. self.cfg_scale = p.cfg_scale
  309. self.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
  310. self.steps = p.steps
  311. self.batch_size = p.batch_size
  312. self.restore_faces = p.restore_faces
  313. self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None
  314. self.sd_model_hash = shared.sd_model.sd_model_hash
  315. self.seed_resize_from_w = p.seed_resize_from_w
  316. self.seed_resize_from_h = p.seed_resize_from_h
  317. self.denoising_strength = getattr(p, 'denoising_strength', None)
  318. self.extra_generation_params = p.extra_generation_params
  319. self.index_of_first_image = index_of_first_image
  320. self.styles = p.styles
  321. self.job_timestamp = state.job_timestamp
  322. self.clip_skip = opts.CLIP_stop_at_last_layers
  323. self.token_merging_ratio = p.token_merging_ratio
  324. self.token_merging_ratio_hr = p.token_merging_ratio_hr
  325. self.eta = p.eta
  326. self.ddim_discretize = p.ddim_discretize
  327. self.s_churn = p.s_churn
  328. self.s_tmin = p.s_tmin
  329. self.s_tmax = p.s_tmax
  330. self.s_noise = p.s_noise
  331. self.s_min_uncond = p.s_min_uncond
  332. self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
  333. self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
  334. self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
  335. self.seed = int(self.seed if type(self.seed) != list else self.seed[0]) if self.seed is not None else -1
  336. self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
  337. self.is_using_inpainting_conditioning = p.is_using_inpainting_conditioning
  338. self.all_prompts = all_prompts or p.all_prompts or [self.prompt]
  339. self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt]
  340. self.all_seeds = all_seeds or p.all_seeds or [self.seed]
  341. self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
  342. self.infotexts = infotexts or [info]
  343. def js(self):
  344. obj = {
  345. "prompt": self.all_prompts[0],
  346. "all_prompts": self.all_prompts,
  347. "negative_prompt": self.all_negative_prompts[0],
  348. "all_negative_prompts": self.all_negative_prompts,
  349. "seed": self.seed,
  350. "all_seeds": self.all_seeds,
  351. "subseed": self.subseed,
  352. "all_subseeds": self.all_subseeds,
  353. "subseed_strength": self.subseed_strength,
  354. "width": self.width,
  355. "height": self.height,
  356. "sampler_name": self.sampler_name,
  357. "cfg_scale": self.cfg_scale,
  358. "steps": self.steps,
  359. "batch_size": self.batch_size,
  360. "restore_faces": self.restore_faces,
  361. "face_restoration_model": self.face_restoration_model,
  362. "sd_model_hash": self.sd_model_hash,
  363. "seed_resize_from_w": self.seed_resize_from_w,
  364. "seed_resize_from_h": self.seed_resize_from_h,
  365. "denoising_strength": self.denoising_strength,
  366. "extra_generation_params": self.extra_generation_params,
  367. "index_of_first_image": self.index_of_first_image,
  368. "infotexts": self.infotexts,
  369. "styles": self.styles,
  370. "job_timestamp": self.job_timestamp,
  371. "clip_skip": self.clip_skip,
  372. "is_using_inpainting_conditioning": self.is_using_inpainting_conditioning,
  373. }
  374. return json.dumps(obj)
  375. def infotext(self, p: StableDiffusionProcessing, index):
  376. return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size)
  377. def get_token_merging_ratio(self, for_hr=False):
  378. return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio
  379. # from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
  380. def slerp(val, low, high):
  381. low_norm = low/torch.norm(low, dim=1, keepdim=True)
  382. high_norm = high/torch.norm(high, dim=1, keepdim=True)
  383. dot = (low_norm*high_norm).sum(1)
  384. if dot.mean() > 0.9995:
  385. return low * val + high * (1 - val)
  386. omega = torch.acos(dot)
  387. so = torch.sin(omega)
  388. res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
  389. return res
  390. def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
  391. eta_noise_seed_delta = opts.eta_noise_seed_delta or 0
  392. xs = []
  393. # if we have multiple seeds, this means we are working with batch size>1; this then
  394. # enables the generation of additional tensors with noise that the sampler will use during its processing.
  395. # Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
  396. # produce the same images as with two batches [100], [101].
  397. if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or eta_noise_seed_delta > 0):
  398. sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
  399. else:
  400. sampler_noises = None
  401. for i, seed in enumerate(seeds):
  402. noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8)
  403. subnoise = None
  404. if subseeds is not None and subseed_strength != 0:
  405. subseed = 0 if i >= len(subseeds) else subseeds[i]
  406. subnoise = devices.randn(subseed, noise_shape)
  407. # randn results depend on device; gpu and cpu get different results for same seed;
  408. # the way I see it, it's better to do this on CPU, so that everyone gets same result;
  409. # but the original script had it like this, so I do not dare change it for now because
  410. # it will break everyone's seeds.
  411. noise = devices.randn(seed, noise_shape)
  412. if subnoise is not None:
  413. noise = slerp(subseed_strength, noise, subnoise)
  414. if noise_shape != shape:
  415. x = devices.randn(seed, shape)
  416. dx = (shape[2] - noise_shape[2]) // 2
  417. dy = (shape[1] - noise_shape[1]) // 2
  418. w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
  419. h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
  420. tx = 0 if dx < 0 else dx
  421. ty = 0 if dy < 0 else dy
  422. dx = max(-dx, 0)
  423. dy = max(-dy, 0)
  424. x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w]
  425. noise = x
  426. if sampler_noises is not None:
  427. cnt = p.sampler.number_of_needed_noises(p)
  428. if eta_noise_seed_delta > 0:
  429. devices.manual_seed(seed + eta_noise_seed_delta)
  430. for j in range(cnt):
  431. sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
  432. xs.append(noise)
  433. if sampler_noises is not None:
  434. p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises]
  435. x = torch.stack(xs).to(shared.device)
  436. return x
  437. def decode_latent_batch(model, batch, target_device=None, check_for_nans=False):
  438. samples = []
  439. for i in range(batch.shape[0]):
  440. sample = decode_first_stage(model, batch[i:i + 1])[0]
  441. if check_for_nans:
  442. try:
  443. devices.test_for_nans(sample, "vae")
  444. except devices.NansException as e:
  445. if devices.dtype_vae == torch.float32 or not shared.opts.auto_vae_precision:
  446. raise e
  447. errors.print_error_explanation(
  448. "A tensor with all NaNs was produced in VAE.\n"
  449. "Web UI will now convert VAE into 32-bit float and retry.\n"
  450. "To disable this behavior, disable the 'Automaticlly revert VAE to 32-bit floats' setting.\n"
  451. "To always start with 32-bit VAE, use --no-half-vae commandline flag."
  452. )
  453. devices.dtype_vae = torch.float32
  454. model.first_stage_model.to(devices.dtype_vae)
  455. batch = batch.to(devices.dtype_vae)
  456. sample = decode_first_stage(model, batch[i:i + 1])[0]
  457. if target_device is not None:
  458. sample = sample.to(target_device)
  459. samples.append(sample)
  460. return samples
  461. def get_fixed_seed(seed):
  462. if seed is None or seed == '' or seed == -1:
  463. return int(random.randrange(4294967294))
  464. return seed
  465. def fix_seed(p):
  466. p.seed = get_fixed_seed(p.seed)
  467. p.subseed = get_fixed_seed(p.subseed)
  468. def program_version():
  469. import launch
  470. res = launch.git_tag()
  471. if res == "<none>":
  472. res = None
  473. return res
  474. def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None):
  475. if index is None:
  476. index = position_in_batch + iteration * p.batch_size
  477. if all_negative_prompts is None:
  478. all_negative_prompts = p.all_negative_prompts
  479. clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
  480. enable_hr = getattr(p, 'enable_hr', False)
  481. token_merging_ratio = p.get_token_merging_ratio()
  482. token_merging_ratio_hr = p.get_token_merging_ratio(for_hr=True)
  483. uses_ensd = opts.eta_noise_seed_delta != 0
  484. if uses_ensd:
  485. uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p)
  486. generation_params = {
  487. "Steps": p.steps,
  488. "Sampler": p.sampler_name,
  489. "CFG scale": p.cfg_scale,
  490. "Image CFG scale": getattr(p, 'image_cfg_scale', None),
  491. "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index],
  492. "Face restoration": (opts.face_restoration_model if p.restore_faces else None),
  493. "Size": f"{p.width}x{p.height}",
  494. "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
  495. "Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra),
  496. "Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])),
  497. "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
  498. "Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
  499. "Denoising strength": getattr(p, 'denoising_strength', None),
  500. "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
  501. "Clip skip": None if clip_skip <= 1 else clip_skip,
  502. "ENSD": opts.eta_noise_seed_delta if uses_ensd else None,
  503. "Token merging ratio": None if token_merging_ratio == 0 else token_merging_ratio,
  504. "Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr,
  505. "Init image hash": getattr(p, 'init_img_hash', None),
  506. "RNG": opts.randn_source if opts.randn_source != "GPU" and opts.randn_source != "NV" else None,
  507. "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond,
  508. **p.extra_generation_params,
  509. "Version": program_version() if opts.add_version_to_infotext else None,
  510. "User": p.user if opts.add_user_name_to_info else None,
  511. }
  512. generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
  513. prompt_text = p.prompt if use_main_prompt else all_prompts[index]
  514. negative_prompt_text = f"\nNegative prompt: {all_negative_prompts[index]}" if all_negative_prompts[index] else ""
  515. return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip()
  516. def process_images(p: StableDiffusionProcessing) -> Processed:
  517. if p.scripts is not None:
  518. p.scripts.before_process(p)
  519. stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}
  520. try:
  521. # if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint
  522. if sd_models.checkpoint_aliases.get(p.override_settings.get('sd_model_checkpoint')) is None:
  523. p.override_settings.pop('sd_model_checkpoint', None)
  524. sd_models.reload_model_weights()
  525. for k, v in p.override_settings.items():
  526. setattr(opts, k, v)
  527. if k == 'sd_model_checkpoint':
  528. sd_models.reload_model_weights()
  529. if k == 'sd_vae':
  530. sd_vae.reload_vae_weights()
  531. sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio())
  532. res = process_images_inner(p)
  533. finally:
  534. sd_models.apply_token_merging(p.sd_model, 0)
  535. # restore opts to original state
  536. if p.override_settings_restore_afterwards:
  537. for k, v in stored_opts.items():
  538. setattr(opts, k, v)
  539. if k == 'sd_vae':
  540. sd_vae.reload_vae_weights()
  541. return res
  542. def process_images_inner(p: StableDiffusionProcessing) -> Processed:
  543. """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
  544. if type(p.prompt) == list:
  545. assert(len(p.prompt) > 0)
  546. else:
  547. assert p.prompt is not None
  548. devices.torch_gc()
  549. seed = get_fixed_seed(p.seed)
  550. subseed = get_fixed_seed(p.subseed)
  551. modules.sd_hijack.model_hijack.apply_circular(p.tiling)
  552. modules.sd_hijack.model_hijack.clear_comments()
  553. comments = {}
  554. p.setup_prompts()
  555. if type(seed) == list:
  556. p.all_seeds = seed
  557. else:
  558. p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))]
  559. if type(subseed) == list:
  560. p.all_subseeds = subseed
  561. else:
  562. p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
  563. if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
  564. model_hijack.embedding_db.load_textual_inversion_embeddings()
  565. if p.scripts is not None:
  566. p.scripts.process(p)
  567. infotexts = []
  568. output_images = []
  569. with torch.no_grad(), p.sd_model.ema_scope():
  570. with devices.autocast():
  571. p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
  572. # for OSX, loading the model during sampling changes the generated picture, so it is loaded here
  573. if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN":
  574. sd_vae_approx.model()
  575. sd_unet.apply_unet()
  576. if state.job_count == -1:
  577. state.job_count = p.n_iter
  578. for n in range(p.n_iter):
  579. p.iteration = n
  580. if state.skipped:
  581. state.skipped = False
  582. if state.interrupted:
  583. break
  584. p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
  585. p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
  586. p.seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
  587. p.subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
  588. if p.scripts is not None:
  589. p.scripts.before_process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds)
  590. if len(p.prompts) == 0:
  591. break
  592. p.parse_extra_network_prompts()
  593. if not p.disable_extra_networks:
  594. with devices.autocast():
  595. extra_networks.activate(p, p.extra_network_data)
  596. if p.scripts is not None:
  597. p.scripts.process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds)
  598. # params.txt should be saved after scripts.process_batch, since the
  599. # infotext could be modified by that callback
  600. # Example: a wildcard processed by process_batch sets an extra model
  601. # strength, which is saved as "Model Strength: 1.0" in the infotext
  602. if n == 0:
  603. with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
  604. processed = Processed(p, [], p.seed, "")
  605. file.write(processed.infotext(p, 0))
  606. p.setup_conds()
  607. for comment in model_hijack.comments:
  608. comments[comment] = 1
  609. p.extra_generation_params.update(model_hijack.extra_generation_params)
  610. if p.n_iter > 1:
  611. shared.state.job = f"Batch {n+1} out of {p.n_iter}"
  612. with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
  613. samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
  614. p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method
  615. x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
  616. x_samples_ddim = torch.stack(x_samples_ddim).float()
  617. x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
  618. del samples_ddim
  619. if lowvram.is_enabled(shared.sd_model):
  620. lowvram.send_everything_to_cpu()
  621. devices.torch_gc()
  622. if p.scripts is not None:
  623. p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
  624. p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
  625. p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
  626. batch_params = scripts.PostprocessBatchListArgs(list(x_samples_ddim))
  627. p.scripts.postprocess_batch_list(p, batch_params, batch_number=n)
  628. x_samples_ddim = batch_params.images
  629. def infotext(index=0, use_main_prompt=False):
  630. return create_infotext(p, p.prompts, p.seeds, p.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=p.negative_prompts)
  631. for i, x_sample in enumerate(x_samples_ddim):
  632. p.batch_index = i
  633. x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
  634. x_sample = x_sample.astype(np.uint8)
  635. if p.restore_faces:
  636. if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
  637. images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-face-restoration")
  638. devices.torch_gc()
  639. x_sample = modules.face_restoration.restore_faces(x_sample)
  640. devices.torch_gc()
  641. image = Image.fromarray(x_sample)
  642. if p.scripts is not None:
  643. pp = scripts.PostprocessImageArgs(image)
  644. p.scripts.postprocess_image(p, pp)
  645. image = pp.image
  646. if p.color_corrections is not None and i < len(p.color_corrections):
  647. if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
  648. image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
  649. images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction")
  650. image = apply_color_correction(p.color_corrections[i], image)
  651. image = apply_overlay(image, p.paste_to, i, p.overlay_images)
  652. if opts.samples_save and not p.do_not_save_samples:
  653. images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p)
  654. text = infotext(i)
  655. infotexts.append(text)
  656. if opts.enable_pnginfo:
  657. image.info["parameters"] = text
  658. output_images.append(image)
  659. if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]):
  660. image_mask = p.mask_for_overlay.convert('RGB')
  661. image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
  662. if opts.save_mask:
  663. images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask")
  664. if opts.save_mask_composite:
  665. images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite")
  666. if opts.return_mask:
  667. output_images.append(image_mask)
  668. if opts.return_mask_composite:
  669. output_images.append(image_mask_composite)
  670. del x_samples_ddim
  671. devices.torch_gc()
  672. state.nextjob()
  673. p.color_corrections = None
  674. index_of_first_image = 0
  675. unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
  676. if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
  677. grid = images.image_grid(output_images, p.batch_size)
  678. if opts.return_grid:
  679. text = infotext(use_main_prompt=True)
  680. infotexts.insert(0, text)
  681. if opts.enable_pnginfo:
  682. grid.info["parameters"] = text
  683. output_images.insert(0, grid)
  684. index_of_first_image = 1
  685. if opts.grid_save:
  686. images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(use_main_prompt=True), short_filename=not opts.grid_extended_filename, p=p, grid=True)
  687. if not p.disable_extra_networks and p.extra_network_data:
  688. extra_networks.deactivate(p, p.extra_network_data)
  689. devices.torch_gc()
  690. res = Processed(
  691. p,
  692. images_list=output_images,
  693. seed=p.all_seeds[0],
  694. info=infotexts[0],
  695. comments="".join(f"{comment}\n" for comment in comments),
  696. subseed=p.all_subseeds[0],
  697. index_of_first_image=index_of_first_image,
  698. infotexts=infotexts,
  699. )
  700. if p.scripts is not None:
  701. p.scripts.postprocess(p, res)
  702. return res
  703. def old_hires_fix_first_pass_dimensions(width, height):
  704. """old algorithm for auto-calculating first pass size"""
  705. desired_pixel_count = 512 * 512
  706. actual_pixel_count = width * height
  707. scale = math.sqrt(desired_pixel_count / actual_pixel_count)
  708. width = math.ceil(scale * width / 64) * 64
  709. height = math.ceil(scale * height / 64) * 64
  710. return width, height
  711. class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
  712. sampler = None
  713. cached_hr_uc = [None, None]
  714. cached_hr_c = [None, None]
  715. def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler_name: str = None, hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs):
  716. super().__init__(**kwargs)
  717. self.enable_hr = enable_hr
  718. self.denoising_strength = denoising_strength
  719. self.hr_scale = hr_scale
  720. self.hr_upscaler = hr_upscaler
  721. self.hr_second_pass_steps = hr_second_pass_steps
  722. self.hr_resize_x = hr_resize_x
  723. self.hr_resize_y = hr_resize_y
  724. self.hr_upscale_to_x = hr_resize_x
  725. self.hr_upscale_to_y = hr_resize_y
  726. self.hr_sampler_name = hr_sampler_name
  727. self.hr_prompt = hr_prompt
  728. self.hr_negative_prompt = hr_negative_prompt
  729. self.all_hr_prompts = None
  730. self.all_hr_negative_prompts = None
  731. if firstphase_width != 0 or firstphase_height != 0:
  732. self.hr_upscale_to_x = self.width
  733. self.hr_upscale_to_y = self.height
  734. self.width = firstphase_width
  735. self.height = firstphase_height
  736. self.truncate_x = 0
  737. self.truncate_y = 0
  738. self.applied_old_hires_behavior_to = None
  739. self.hr_prompts = None
  740. self.hr_negative_prompts = None
  741. self.hr_extra_network_data = None
  742. self.cached_hr_uc = StableDiffusionProcessingTxt2Img.cached_hr_uc
  743. self.cached_hr_c = StableDiffusionProcessingTxt2Img.cached_hr_c
  744. self.hr_c = None
  745. self.hr_uc = None
  746. def init(self, all_prompts, all_seeds, all_subseeds):
  747. if self.enable_hr:
  748. if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name:
  749. self.extra_generation_params["Hires sampler"] = self.hr_sampler_name
  750. if tuple(self.hr_prompt) != tuple(self.prompt):
  751. self.extra_generation_params["Hires prompt"] = self.hr_prompt
  752. if tuple(self.hr_negative_prompt) != tuple(self.negative_prompt):
  753. self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt
  754. if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height):
  755. self.hr_resize_x = self.width
  756. self.hr_resize_y = self.height
  757. self.hr_upscale_to_x = self.width
  758. self.hr_upscale_to_y = self.height
  759. self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height)
  760. self.applied_old_hires_behavior_to = (self.width, self.height)
  761. if self.hr_resize_x == 0 and self.hr_resize_y == 0:
  762. self.extra_generation_params["Hires upscale"] = self.hr_scale
  763. self.hr_upscale_to_x = int(self.width * self.hr_scale)
  764. self.hr_upscale_to_y = int(self.height * self.hr_scale)
  765. else:
  766. self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}"
  767. if self.hr_resize_y == 0:
  768. self.hr_upscale_to_x = self.hr_resize_x
  769. self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
  770. elif self.hr_resize_x == 0:
  771. self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
  772. self.hr_upscale_to_y = self.hr_resize_y
  773. else:
  774. target_w = self.hr_resize_x
  775. target_h = self.hr_resize_y
  776. src_ratio = self.width / self.height
  777. dst_ratio = self.hr_resize_x / self.hr_resize_y
  778. if src_ratio < dst_ratio:
  779. self.hr_upscale_to_x = self.hr_resize_x
  780. self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
  781. else:
  782. self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
  783. self.hr_upscale_to_y = self.hr_resize_y
  784. self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
  785. self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
  786. # special case: the user has chosen to do nothing
  787. if self.hr_upscale_to_x == self.width and self.hr_upscale_to_y == self.height:
  788. self.enable_hr = False
  789. self.denoising_strength = None
  790. self.extra_generation_params.pop("Hires upscale", None)
  791. self.extra_generation_params.pop("Hires resize", None)
  792. return
  793. if not state.processing_has_refined_job_count:
  794. if state.job_count == -1:
  795. state.job_count = self.n_iter
  796. shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count)
  797. state.job_count = state.job_count * 2
  798. state.processing_has_refined_job_count = True
  799. if self.hr_second_pass_steps:
  800. self.extra_generation_params["Hires steps"] = self.hr_second_pass_steps
  801. if self.hr_upscaler is not None:
  802. self.extra_generation_params["Hires upscaler"] = self.hr_upscaler
  803. def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
  804. self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
  805. latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
  806. if self.enable_hr and latent_scale_mode is None:
  807. if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers):
  808. raise Exception(f"could not find upscaler named {self.hr_upscaler}")
  809. x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
  810. samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
  811. if not self.enable_hr:
  812. return samples
  813. self.is_hr_pass = True
  814. target_width = self.hr_upscale_to_x
  815. target_height = self.hr_upscale_to_y
  816. def save_intermediate(image, index):
  817. """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
  818. if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
  819. return
  820. if not isinstance(image, Image.Image):
  821. image = sd_samplers.sample_to_image(image, index, approximation=0)
  822. info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index)
  823. images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, p=self, suffix="-before-highres-fix")
  824. if latent_scale_mode is not None:
  825. for i in range(samples.shape[0]):
  826. save_intermediate(samples, i)
  827. samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
  828. # Avoid making the inpainting conditioning unless necessary as
  829. # this does need some extra compute to decode / encode the image again.
  830. if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0:
  831. image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples)
  832. else:
  833. image_conditioning = self.txt2img_image_conditioning(samples)
  834. else:
  835. decoded_samples = decode_first_stage(self.sd_model, samples)
  836. lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
  837. batch_images = []
  838. for i, x_sample in enumerate(lowres_samples):
  839. x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
  840. x_sample = x_sample.astype(np.uint8)
  841. image = Image.fromarray(x_sample)
  842. save_intermediate(image, i)
  843. image = images.resize_image(0, image, target_width, target_height, upscaler_name=self.hr_upscaler)
  844. image = np.array(image).astype(np.float32) / 255.0
  845. image = np.moveaxis(image, 2, 0)
  846. batch_images.append(image)
  847. decoded_samples = torch.from_numpy(np.array(batch_images))
  848. decoded_samples = decoded_samples.to(shared.device)
  849. self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
  850. samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method))
  851. image_conditioning = self.img2img_image_conditioning(decoded_samples, samples)
  852. shared.state.nextjob()
  853. img2img_sampler_name = self.hr_sampler_name or self.sampler_name
  854. if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM
  855. img2img_sampler_name = 'DDIM'
  856. self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model)
  857. samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2]
  858. noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self)
  859. # GC now before running the next img2img to prevent running out of memory
  860. x = None
  861. devices.torch_gc()
  862. if not self.disable_extra_networks:
  863. with devices.autocast():
  864. extra_networks.activate(self, self.hr_extra_network_data)
  865. with devices.autocast():
  866. self.calculate_hr_conds()
  867. sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio(for_hr=True))
  868. if self.scripts is not None:
  869. self.scripts.before_hr(self)
  870. samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
  871. sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio())
  872. self.is_hr_pass = False
  873. return samples
  874. def close(self):
  875. super().close()
  876. self.hr_c = None
  877. self.hr_uc = None
  878. if not opts.experimental_persistent_cond_cache:
  879. StableDiffusionProcessingTxt2Img.cached_hr_uc = [None, None]
  880. StableDiffusionProcessingTxt2Img.cached_hr_c = [None, None]
  881. def setup_prompts(self):
  882. super().setup_prompts()
  883. if not self.enable_hr:
  884. return
  885. if self.hr_prompt == '':
  886. self.hr_prompt = self.prompt
  887. if self.hr_negative_prompt == '':
  888. self.hr_negative_prompt = self.negative_prompt
  889. if type(self.hr_prompt) == list:
  890. self.all_hr_prompts = self.hr_prompt
  891. else:
  892. self.all_hr_prompts = self.batch_size * self.n_iter * [self.hr_prompt]
  893. if type(self.hr_negative_prompt) == list:
  894. self.all_hr_negative_prompts = self.hr_negative_prompt
  895. else:
  896. self.all_hr_negative_prompts = self.batch_size * self.n_iter * [self.hr_negative_prompt]
  897. self.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_hr_prompts]
  898. self.all_hr_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_hr_negative_prompts]
  899. def calculate_hr_conds(self):
  900. if self.hr_c is not None:
  901. return
  902. self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.hr_negative_prompts, self.steps * self.step_multiplier, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data)
  903. self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.hr_prompts, self.steps * self.step_multiplier, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data)
  904. def setup_conds(self):
  905. super().setup_conds()
  906. self.hr_uc = None
  907. self.hr_c = None
  908. if self.enable_hr:
  909. if shared.opts.hires_fix_use_firstpass_conds:
  910. self.calculate_hr_conds()
  911. elif lowvram.is_enabled(shared.sd_model): # if in lowvram mode, we need to calculate conds right away, before the cond NN is unloaded
  912. with devices.autocast():
  913. extra_networks.activate(self, self.hr_extra_network_data)
  914. self.calculate_hr_conds()
  915. with devices.autocast():
  916. extra_networks.activate(self, self.extra_network_data)
  917. def parse_extra_network_prompts(self):
  918. res = super().parse_extra_network_prompts()
  919. if self.enable_hr:
  920. self.hr_prompts = self.all_hr_prompts[self.iteration * self.batch_size:(self.iteration + 1) * self.batch_size]
  921. self.hr_negative_prompts = self.all_hr_negative_prompts[self.iteration * self.batch_size:(self.iteration + 1) * self.batch_size]
  922. self.hr_prompts, self.hr_extra_network_data = extra_networks.parse_prompts(self.hr_prompts)
  923. return res
  924. class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
  925. sampler = None
  926. def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = None, mask_blur_x: int = 4, mask_blur_y: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs):
  927. super().__init__(**kwargs)
  928. self.init_images = init_images
  929. self.resize_mode: int = resize_mode
  930. self.denoising_strength: float = denoising_strength
  931. self.image_cfg_scale: float = image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
  932. self.init_latent = None
  933. self.image_mask = mask
  934. self.latent_mask = None
  935. self.mask_for_overlay = None
  936. if mask_blur is not None:
  937. mask_blur_x = mask_blur
  938. mask_blur_y = mask_blur
  939. self.mask_blur_x = mask_blur_x
  940. self.mask_blur_y = mask_blur_y
  941. self.inpainting_fill = inpainting_fill
  942. self.inpaint_full_res = inpaint_full_res
  943. self.inpaint_full_res_padding = inpaint_full_res_padding
  944. self.inpainting_mask_invert = inpainting_mask_invert
  945. self.initial_noise_multiplier = opts.initial_noise_multiplier if initial_noise_multiplier is None else initial_noise_multiplier
  946. self.mask = None
  947. self.nmask = None
  948. self.image_conditioning = None
  949. def init(self, all_prompts, all_seeds, all_subseeds):
  950. self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
  951. crop_region = None
  952. image_mask = self.image_mask
  953. if image_mask is not None:
  954. image_mask = image_mask.convert('L')
  955. if self.inpainting_mask_invert:
  956. image_mask = ImageOps.invert(image_mask)
  957. if self.mask_blur_x > 0:
  958. np_mask = np.array(image_mask)
  959. kernel_size = 2 * int(4 * self.mask_blur_x + 0.5) + 1
  960. np_mask = cv2.GaussianBlur(np_mask, (kernel_size, 1), self.mask_blur_x)
  961. image_mask = Image.fromarray(np_mask)
  962. if self.mask_blur_y > 0:
  963. np_mask = np.array(image_mask)
  964. kernel_size = 2 * int(4 * self.mask_blur_y + 0.5) + 1
  965. np_mask = cv2.GaussianBlur(np_mask, (1, kernel_size), self.mask_blur_y)
  966. image_mask = Image.fromarray(np_mask)
  967. if self.inpaint_full_res:
  968. self.mask_for_overlay = image_mask
  969. mask = image_mask.convert('L')
  970. crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding)
  971. crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
  972. x1, y1, x2, y2 = crop_region
  973. mask = mask.crop(crop_region)
  974. image_mask = images.resize_image(2, mask, self.width, self.height)
  975. self.paste_to = (x1, y1, x2-x1, y2-y1)
  976. else:
  977. image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
  978. np_mask = np.array(image_mask)
  979. np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
  980. self.mask_for_overlay = Image.fromarray(np_mask)
  981. self.overlay_images = []
  982. latent_mask = self.latent_mask if self.latent_mask is not None else image_mask
  983. add_color_corrections = opts.img2img_color_correction and self.color_corrections is None
  984. if add_color_corrections:
  985. self.color_corrections = []
  986. imgs = []
  987. for img in self.init_images:
  988. # Save init image
  989. if opts.save_init_img:
  990. self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest()
  991. images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False)
  992. image = images.flatten(img, opts.img2img_background_color)
  993. if crop_region is None and self.resize_mode != 3:
  994. image = images.resize_image(self.resize_mode, image, self.width, self.height)
  995. if image_mask is not None:
  996. image_masked = Image.new('RGBa', (image.width, image.height))
  997. image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L')))
  998. self.overlay_images.append(image_masked.convert('RGBA'))
  999. # crop_region is not None if we are doing inpaint full res
  1000. if crop_region is not None:
  1001. image = image.crop(crop_region)
  1002. image = images.resize_image(2, image, self.width, self.height)
  1003. if image_mask is not None:
  1004. if self.inpainting_fill != 1:
  1005. image = masking.fill(image, latent_mask)
  1006. if add_color_corrections:
  1007. self.color_corrections.append(setup_color_correction(image))
  1008. image = np.array(image).astype(np.float32) / 255.0
  1009. image = np.moveaxis(image, 2, 0)
  1010. imgs.append(image)
  1011. if len(imgs) == 1:
  1012. batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0)
  1013. if self.overlay_images is not None:
  1014. self.overlay_images = self.overlay_images * self.batch_size
  1015. if self.color_corrections is not None and len(self.color_corrections) == 1:
  1016. self.color_corrections = self.color_corrections * self.batch_size
  1017. elif len(imgs) <= self.batch_size:
  1018. self.batch_size = len(imgs)
  1019. batch_images = np.array(imgs)
  1020. else:
  1021. raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
  1022. image = torch.from_numpy(batch_images)
  1023. self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
  1024. self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model)
  1025. devices.torch_gc()
  1026. if self.resize_mode == 3:
  1027. self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
  1028. if image_mask is not None:
  1029. init_mask = latent_mask
  1030. latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
  1031. latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
  1032. latmask = latmask[0]
  1033. latmask = np.around(latmask)
  1034. latmask = np.tile(latmask[None], (4, 1, 1))
  1035. self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
  1036. self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype)
  1037. # this needs to be fixed to be done in sample() using actual seeds for batches
  1038. if self.inpainting_fill == 2:
  1039. self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask
  1040. elif self.inpainting_fill == 3:
  1041. self.init_latent = self.init_latent * self.mask
  1042. self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, image_mask)
  1043. def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
  1044. x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
  1045. if self.initial_noise_multiplier != 1.0:
  1046. self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier
  1047. x *= self.initial_noise_multiplier
  1048. samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
  1049. if self.mask is not None:
  1050. samples = samples * self.nmask + self.init_latent * self.mask
  1051. del x
  1052. devices.torch_gc()
  1053. return samples
  1054. def get_token_merging_ratio(self, for_hr=False):
  1055. return self.token_merging_ratio or ("token_merging_ratio" in self.override_settings and opts.token_merging_ratio) or opts.token_merging_ratio_img2img or opts.token_merging_ratio