processing.py 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173
  1. import json
  2. import math
  3. import os
  4. import sys
  5. import hashlib
  6. import torch
  7. import numpy as np
  8. from PIL import Image, ImageFilter, ImageOps
  9. import random
  10. import cv2
  11. from skimage import exposure
  12. from typing import Any, Dict, List
  13. import modules.sd_hijack
  14. from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common
  15. from modules.sd_hijack import model_hijack
  16. from modules.shared import opts, cmd_opts, state
  17. import modules.shared as shared
  18. import modules.paths as paths
  19. import modules.face_restoration
  20. import modules.images as images
  21. import modules.styles
  22. import modules.sd_models as sd_models
  23. import modules.sd_vae as sd_vae
  24. import logging
  25. from ldm.data.util import AddMiDaS
  26. from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion
  27. from einops import repeat, rearrange
  28. from blendmodes.blend import blendLayers, BlendType
  29. # some of those options should not be changed at all because they would break the model, so I removed them from options.
  30. opt_C = 4
  31. opt_f = 8
  32. def setup_color_correction(image):
  33. logging.info("Calibrating color correction.")
  34. correction_target = cv2.cvtColor(np.asarray(image.copy()), cv2.COLOR_RGB2LAB)
  35. return correction_target
  36. def apply_color_correction(correction, original_image):
  37. logging.info("Applying color correction.")
  38. image = Image.fromarray(cv2.cvtColor(exposure.match_histograms(
  39. cv2.cvtColor(
  40. np.asarray(original_image),
  41. cv2.COLOR_RGB2LAB
  42. ),
  43. correction,
  44. channel_axis=2
  45. ), cv2.COLOR_LAB2RGB).astype("uint8"))
  46. image = blendLayers(image, original_image, BlendType.LUMINOSITY)
  47. return image
  48. def apply_overlay(image, paste_loc, index, overlays):
  49. if overlays is None or index >= len(overlays):
  50. return image
  51. overlay = overlays[index]
  52. if paste_loc is not None:
  53. x, y, w, h = paste_loc
  54. base_image = Image.new('RGBA', (overlay.width, overlay.height))
  55. image = images.resize_image(1, image, w, h)
  56. base_image.paste(image, (x, y))
  57. image = base_image
  58. image = image.convert('RGBA')
  59. image.alpha_composite(overlay)
  60. image = image.convert('RGB')
  61. return image
  62. def txt2img_image_conditioning(sd_model, x, width, height):
  63. if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models
  64. # The "masked-image" in this case will just be all zeros since the entire image is masked.
  65. image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
  66. image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning))
  67. # Add the fake full 1s mask to the first dimension.
  68. image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
  69. image_conditioning = image_conditioning.to(x.dtype)
  70. return image_conditioning
  71. elif sd_model.model.conditioning_key == "crossattn-adm": # UnCLIP models
  72. return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
  73. else:
  74. # Dummy zero conditioning if we're not using inpainting or unclip models.
  75. # Still takes up a bit of memory, but no encoder call.
  76. # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
  77. return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
  78. class StableDiffusionProcessing:
  79. """
  80. The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
  81. """
  82. def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
  83. if sampler_index is not None:
  84. print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
  85. self.outpath_samples: str = outpath_samples
  86. self.outpath_grids: str = outpath_grids
  87. self.prompt: str = prompt
  88. self.prompt_for_display: str = None
  89. self.negative_prompt: str = (negative_prompt or "")
  90. self.styles: list = styles or []
  91. self.seed: int = seed
  92. self.subseed: int = subseed
  93. self.subseed_strength: float = subseed_strength
  94. self.seed_resize_from_h: int = seed_resize_from_h
  95. self.seed_resize_from_w: int = seed_resize_from_w
  96. self.sampler_name: str = sampler_name
  97. self.batch_size: int = batch_size
  98. self.n_iter: int = n_iter
  99. self.steps: int = steps
  100. self.cfg_scale: float = cfg_scale
  101. self.width: int = width
  102. self.height: int = height
  103. self.restore_faces: bool = restore_faces
  104. self.tiling: bool = tiling
  105. self.do_not_save_samples: bool = do_not_save_samples
  106. self.do_not_save_grid: bool = do_not_save_grid
  107. self.extra_generation_params: dict = extra_generation_params or {}
  108. self.overlay_images = overlay_images
  109. self.eta = eta
  110. self.do_not_reload_embeddings = do_not_reload_embeddings
  111. self.paste_to = None
  112. self.color_corrections = None
  113. self.denoising_strength: float = denoising_strength
  114. self.sampler_noise_scheduler_override = None
  115. self.ddim_discretize = ddim_discretize or opts.ddim_discretize
  116. self.s_min_uncond = s_min_uncond or opts.s_min_uncond
  117. self.s_churn = s_churn or opts.s_churn
  118. self.s_tmin = s_tmin or opts.s_tmin
  119. self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
  120. self.s_noise = s_noise or opts.s_noise
  121. self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts}
  122. self.override_settings_restore_afterwards = override_settings_restore_afterwards
  123. self.is_using_inpainting_conditioning = False
  124. self.disable_extra_networks = False
  125. self.token_merging_ratio = 0
  126. self.token_merging_ratio_hr = 0
  127. if not seed_enable_extras:
  128. self.subseed = -1
  129. self.subseed_strength = 0
  130. self.seed_resize_from_h = 0
  131. self.seed_resize_from_w = 0
  132. self.scripts = None
  133. self.script_args = script_args
  134. self.all_prompts = None
  135. self.all_negative_prompts = None
  136. self.all_seeds = None
  137. self.all_subseeds = None
  138. self.iteration = 0
  139. self.is_hr_pass = False
  140. self.sampler = None
  141. @property
  142. def sd_model(self):
  143. return shared.sd_model
  144. def txt2img_image_conditioning(self, x, width=None, height=None):
  145. self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'}
  146. return txt2img_image_conditioning(self.sd_model, x, width or self.width, height or self.height)
  147. def depth2img_image_conditioning(self, source_image):
  148. # Use the AddMiDaS helper to Format our source image to suit the MiDaS model
  149. transformer = AddMiDaS(model_type="dpt_hybrid")
  150. transformed = transformer({"jpg": rearrange(source_image[0], "c h w -> h w c")})
  151. midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device)
  152. midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size)
  153. conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image))
  154. conditioning = torch.nn.functional.interpolate(
  155. self.sd_model.depth_model(midas_in),
  156. size=conditioning_image.shape[2:],
  157. mode="bicubic",
  158. align_corners=False,
  159. )
  160. (depth_min, depth_max) = torch.aminmax(conditioning)
  161. conditioning = 2. * (conditioning - depth_min) / (depth_max - depth_min) - 1.
  162. return conditioning
  163. def edit_image_conditioning(self, source_image):
  164. conditioning_image = self.sd_model.encode_first_stage(source_image).mode()
  165. return conditioning_image
  166. def unclip_image_conditioning(self, source_image):
  167. c_adm = self.sd_model.embedder(source_image)
  168. if self.sd_model.noise_augmentor is not None:
  169. noise_level = 0 # TODO: Allow other noise levels?
  170. c_adm, noise_level_emb = self.sd_model.noise_augmentor(c_adm, noise_level=repeat(torch.tensor([noise_level]).to(c_adm.device), '1 -> b', b=c_adm.shape[0]))
  171. c_adm = torch.cat((c_adm, noise_level_emb), 1)
  172. return c_adm
  173. def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None):
  174. self.is_using_inpainting_conditioning = True
  175. # Handle the different mask inputs
  176. if image_mask is not None:
  177. if torch.is_tensor(image_mask):
  178. conditioning_mask = image_mask
  179. else:
  180. conditioning_mask = np.array(image_mask.convert("L"))
  181. conditioning_mask = conditioning_mask.astype(np.float32) / 255.0
  182. conditioning_mask = torch.from_numpy(conditioning_mask[None, None])
  183. # Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0
  184. conditioning_mask = torch.round(conditioning_mask)
  185. else:
  186. conditioning_mask = source_image.new_ones(1, 1, *source_image.shape[-2:])
  187. # Create another latent image, this time with a masked version of the original input.
  188. # Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
  189. conditioning_mask = conditioning_mask.to(device=source_image.device, dtype=source_image.dtype)
  190. conditioning_image = torch.lerp(
  191. source_image,
  192. source_image * (1.0 - conditioning_mask),
  193. getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight)
  194. )
  195. # Encode the new masked image using first stage of network.
  196. conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))
  197. # Create the concatenated conditioning tensor to be fed to `c_concat`
  198. conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:])
  199. conditioning_mask = conditioning_mask.expand(conditioning_image.shape[0], -1, -1, -1)
  200. image_conditioning = torch.cat([conditioning_mask, conditioning_image], dim=1)
  201. image_conditioning = image_conditioning.to(shared.device).type(self.sd_model.dtype)
  202. return image_conditioning
  203. def img2img_image_conditioning(self, source_image, latent_image, image_mask=None):
  204. source_image = devices.cond_cast_float(source_image)
  205. # HACK: Using introspection as the Depth2Image model doesn't appear to uniquely
  206. # identify itself with a field common to all models. The conditioning_key is also hybrid.
  207. if isinstance(self.sd_model, LatentDepth2ImageDiffusion):
  208. return self.depth2img_image_conditioning(source_image)
  209. if self.sd_model.cond_stage_key == "edit":
  210. return self.edit_image_conditioning(source_image)
  211. if self.sampler.conditioning_key in {'hybrid', 'concat'}:
  212. return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
  213. if self.sampler.conditioning_key == "crossattn-adm":
  214. return self.unclip_image_conditioning(source_image)
  215. # Dummy zero conditioning if we're not using inpainting or depth model.
  216. return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
  217. def init(self, all_prompts, all_seeds, all_subseeds):
  218. pass
  219. def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
  220. raise NotImplementedError()
  221. def close(self):
  222. self.sampler = None
  223. def get_token_merging_ratio(self, for_hr=False):
  224. if for_hr:
  225. return self.token_merging_ratio_hr or opts.token_merging_ratio_hr or self.token_merging_ratio or opts.token_merging_ratio
  226. return self.token_merging_ratio or opts.token_merging_ratio
  227. class Processed:
  228. def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""):
  229. self.images = images_list
  230. self.prompt = p.prompt
  231. self.negative_prompt = p.negative_prompt
  232. self.seed = seed
  233. self.subseed = subseed
  234. self.subseed_strength = p.subseed_strength
  235. self.info = info
  236. self.comments = comments
  237. self.width = p.width
  238. self.height = p.height
  239. self.sampler_name = p.sampler_name
  240. self.cfg_scale = p.cfg_scale
  241. self.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
  242. self.steps = p.steps
  243. self.batch_size = p.batch_size
  244. self.restore_faces = p.restore_faces
  245. self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None
  246. self.sd_model_hash = shared.sd_model.sd_model_hash
  247. self.seed_resize_from_w = p.seed_resize_from_w
  248. self.seed_resize_from_h = p.seed_resize_from_h
  249. self.denoising_strength = getattr(p, 'denoising_strength', None)
  250. self.extra_generation_params = p.extra_generation_params
  251. self.index_of_first_image = index_of_first_image
  252. self.styles = p.styles
  253. self.job_timestamp = state.job_timestamp
  254. self.clip_skip = opts.CLIP_stop_at_last_layers
  255. self.token_merging_ratio = p.token_merging_ratio
  256. self.token_merging_ratio_hr = p.token_merging_ratio_hr
  257. self.eta = p.eta
  258. self.ddim_discretize = p.ddim_discretize
  259. self.s_churn = p.s_churn
  260. self.s_tmin = p.s_tmin
  261. self.s_tmax = p.s_tmax
  262. self.s_noise = p.s_noise
  263. self.s_min_uncond = p.s_min_uncond
  264. self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
  265. self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
  266. self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
  267. self.seed = int(self.seed if type(self.seed) != list else self.seed[0]) if self.seed is not None else -1
  268. self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
  269. self.is_using_inpainting_conditioning = p.is_using_inpainting_conditioning
  270. self.all_prompts = all_prompts or p.all_prompts or [self.prompt]
  271. self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt]
  272. self.all_seeds = all_seeds or p.all_seeds or [self.seed]
  273. self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
  274. self.infotexts = infotexts or [info]
  275. def js(self):
  276. obj = {
  277. "prompt": self.all_prompts[0],
  278. "all_prompts": self.all_prompts,
  279. "negative_prompt": self.all_negative_prompts[0],
  280. "all_negative_prompts": self.all_negative_prompts,
  281. "seed": self.seed,
  282. "all_seeds": self.all_seeds,
  283. "subseed": self.subseed,
  284. "all_subseeds": self.all_subseeds,
  285. "subseed_strength": self.subseed_strength,
  286. "width": self.width,
  287. "height": self.height,
  288. "sampler_name": self.sampler_name,
  289. "cfg_scale": self.cfg_scale,
  290. "steps": self.steps,
  291. "batch_size": self.batch_size,
  292. "restore_faces": self.restore_faces,
  293. "face_restoration_model": self.face_restoration_model,
  294. "sd_model_hash": self.sd_model_hash,
  295. "seed_resize_from_w": self.seed_resize_from_w,
  296. "seed_resize_from_h": self.seed_resize_from_h,
  297. "denoising_strength": self.denoising_strength,
  298. "extra_generation_params": self.extra_generation_params,
  299. "index_of_first_image": self.index_of_first_image,
  300. "infotexts": self.infotexts,
  301. "styles": self.styles,
  302. "job_timestamp": self.job_timestamp,
  303. "clip_skip": self.clip_skip,
  304. "is_using_inpainting_conditioning": self.is_using_inpainting_conditioning,
  305. }
  306. return json.dumps(obj)
  307. def infotext(self, p: StableDiffusionProcessing, index):
  308. return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size)
  309. def get_token_merging_ratio(self, for_hr=False):
  310. return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio
  311. # from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
  312. def slerp(val, low, high):
  313. low_norm = low/torch.norm(low, dim=1, keepdim=True)
  314. high_norm = high/torch.norm(high, dim=1, keepdim=True)
  315. dot = (low_norm*high_norm).sum(1)
  316. if dot.mean() > 0.9995:
  317. return low * val + high * (1 - val)
  318. omega = torch.acos(dot)
  319. so = torch.sin(omega)
  320. res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
  321. return res
  322. def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
  323. eta_noise_seed_delta = opts.eta_noise_seed_delta or 0
  324. xs = []
  325. # if we have multiple seeds, this means we are working with batch size>1; this then
  326. # enables the generation of additional tensors with noise that the sampler will use during its processing.
  327. # Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
  328. # produce the same images as with two batches [100], [101].
  329. if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or eta_noise_seed_delta > 0):
  330. sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
  331. else:
  332. sampler_noises = None
  333. for i, seed in enumerate(seeds):
  334. noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8)
  335. subnoise = None
  336. if subseeds is not None:
  337. subseed = 0 if i >= len(subseeds) else subseeds[i]
  338. subnoise = devices.randn(subseed, noise_shape)
  339. # randn results depend on device; gpu and cpu get different results for same seed;
  340. # the way I see it, it's better to do this on CPU, so that everyone gets same result;
  341. # but the original script had it like this, so I do not dare change it for now because
  342. # it will break everyone's seeds.
  343. noise = devices.randn(seed, noise_shape)
  344. if subnoise is not None:
  345. noise = slerp(subseed_strength, noise, subnoise)
  346. if noise_shape != shape:
  347. x = devices.randn(seed, shape)
  348. dx = (shape[2] - noise_shape[2]) // 2
  349. dy = (shape[1] - noise_shape[1]) // 2
  350. w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
  351. h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
  352. tx = 0 if dx < 0 else dx
  353. ty = 0 if dy < 0 else dy
  354. dx = max(-dx, 0)
  355. dy = max(-dy, 0)
  356. x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w]
  357. noise = x
  358. if sampler_noises is not None:
  359. cnt = p.sampler.number_of_needed_noises(p)
  360. if eta_noise_seed_delta > 0:
  361. torch.manual_seed(seed + eta_noise_seed_delta)
  362. for j in range(cnt):
  363. sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
  364. xs.append(noise)
  365. if sampler_noises is not None:
  366. p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises]
  367. x = torch.stack(xs).to(shared.device)
  368. return x
  369. def decode_first_stage(model, x):
  370. with devices.autocast(disable=x.dtype == devices.dtype_vae):
  371. x = model.decode_first_stage(x)
  372. return x
  373. def get_fixed_seed(seed):
  374. if seed is None or seed == '' or seed == -1:
  375. return int(random.randrange(4294967294))
  376. return seed
  377. def fix_seed(p):
  378. p.seed = get_fixed_seed(p.seed)
  379. p.subseed = get_fixed_seed(p.subseed)
  380. def program_version():
  381. import launch
  382. res = launch.git_tag()
  383. if res == "<none>":
  384. res = None
  385. return res
  386. def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0):
  387. index = position_in_batch + iteration * p.batch_size
  388. clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
  389. enable_hr = getattr(p, 'enable_hr', False)
  390. token_merging_ratio = p.get_token_merging_ratio()
  391. token_merging_ratio_hr = p.get_token_merging_ratio(for_hr=True)
  392. uses_ensd = opts.eta_noise_seed_delta != 0
  393. if uses_ensd:
  394. uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p)
  395. generation_params = {
  396. "Steps": p.steps,
  397. "Sampler": p.sampler_name,
  398. "CFG scale": p.cfg_scale,
  399. "Image CFG scale": getattr(p, 'image_cfg_scale', None),
  400. "Seed": all_seeds[index],
  401. "Face restoration": (opts.face_restoration_model if p.restore_faces else None),
  402. "Size": f"{p.width}x{p.height}",
  403. "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
  404. "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
  405. "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
  406. "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
  407. "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
  408. "Denoising strength": getattr(p, 'denoising_strength', None),
  409. "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
  410. "Clip skip": None if clip_skip <= 1 else clip_skip,
  411. "ENSD": opts.eta_noise_seed_delta if uses_ensd else None,
  412. "Token merging ratio": None if token_merging_ratio == 0 else token_merging_ratio,
  413. "Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr,
  414. "Init image hash": getattr(p, 'init_img_hash', None),
  415. "RNG": opts.randn_source if opts.randn_source != "GPU" else None,
  416. "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond,
  417. **p.extra_generation_params,
  418. "Version": program_version() if opts.add_version_to_infotext else None,
  419. }
  420. generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
  421. negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else ""
  422. return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip()
  423. def process_images(p: StableDiffusionProcessing) -> Processed:
  424. stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}
  425. try:
  426. # if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint
  427. if sd_models.checkpoint_alisases.get(p.override_settings.get('sd_model_checkpoint')) is None:
  428. p.override_settings.pop('sd_model_checkpoint', None)
  429. sd_models.reload_model_weights()
  430. for k, v in p.override_settings.items():
  431. setattr(opts, k, v)
  432. if k == 'sd_model_checkpoint':
  433. sd_models.reload_model_weights()
  434. if k == 'sd_vae':
  435. sd_vae.reload_vae_weights()
  436. sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio())
  437. res = process_images_inner(p)
  438. finally:
  439. sd_models.apply_token_merging(p.sd_model, 0)
  440. # restore opts to original state
  441. if p.override_settings_restore_afterwards:
  442. for k, v in stored_opts.items():
  443. setattr(opts, k, v)
  444. if k == 'sd_vae':
  445. sd_vae.reload_vae_weights()
  446. return res
  447. def process_images_inner(p: StableDiffusionProcessing) -> Processed:
  448. """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
  449. if type(p.prompt) == list:
  450. assert(len(p.prompt) > 0)
  451. else:
  452. assert p.prompt is not None
  453. devices.torch_gc()
  454. seed = get_fixed_seed(p.seed)
  455. subseed = get_fixed_seed(p.subseed)
  456. modules.sd_hijack.model_hijack.apply_circular(p.tiling)
  457. modules.sd_hijack.model_hijack.clear_comments()
  458. comments = {}
  459. if type(p.prompt) == list:
  460. p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.prompt]
  461. else:
  462. p.all_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_styles_to_prompt(p.prompt, p.styles)]
  463. if type(p.negative_prompt) == list:
  464. p.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in p.negative_prompt]
  465. else:
  466. p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)]
  467. if type(seed) == list:
  468. p.all_seeds = seed
  469. else:
  470. p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))]
  471. if type(subseed) == list:
  472. p.all_subseeds = subseed
  473. else:
  474. p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
  475. def infotext(iteration=0, position_in_batch=0):
  476. return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch)
  477. if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
  478. model_hijack.embedding_db.load_textual_inversion_embeddings()
  479. if p.scripts is not None:
  480. p.scripts.process(p)
  481. infotexts = []
  482. output_images = []
  483. cached_uc = [None, None]
  484. cached_c = [None, None]
  485. def get_conds_with_caching(function, required_prompts, steps, cache):
  486. """
  487. Returns the result of calling function(shared.sd_model, required_prompts, steps)
  488. using a cache to store the result if the same arguments have been used before.
  489. cache is an array containing two elements. The first element is a tuple
  490. representing the previously used arguments, or None if no arguments
  491. have been used before. The second element is where the previously
  492. computed result is stored.
  493. """
  494. if cache[0] is not None and (required_prompts, steps) == cache[0]:
  495. return cache[1]
  496. with devices.autocast():
  497. cache[1] = function(shared.sd_model, required_prompts, steps)
  498. cache[0] = (required_prompts, steps)
  499. return cache[1]
  500. with torch.no_grad(), p.sd_model.ema_scope():
  501. with devices.autocast():
  502. p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
  503. # for OSX, loading the model during sampling changes the generated picture, so it is loaded here
  504. if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN":
  505. sd_vae_approx.model()
  506. if state.job_count == -1:
  507. state.job_count = p.n_iter
  508. extra_network_data = None
  509. for n in range(p.n_iter):
  510. p.iteration = n
  511. if state.skipped:
  512. state.skipped = False
  513. if state.interrupted:
  514. break
  515. prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
  516. negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
  517. seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
  518. subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
  519. if p.scripts is not None:
  520. p.scripts.before_process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds)
  521. if len(prompts) == 0:
  522. break
  523. prompts, extra_network_data = extra_networks.parse_prompts(prompts)
  524. if not p.disable_extra_networks:
  525. with devices.autocast():
  526. extra_networks.activate(p, extra_network_data)
  527. if p.scripts is not None:
  528. p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds)
  529. # params.txt should be saved after scripts.process_batch, since the
  530. # infotext could be modified by that callback
  531. # Example: a wildcard processed by process_batch sets an extra model
  532. # strength, which is saved as "Model Strength: 1.0" in the infotext
  533. if n == 0:
  534. with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
  535. processed = Processed(p, [], p.seed, "")
  536. file.write(processed.infotext(p, 0))
  537. sampler_config = sd_samplers.find_sampler_config(p.sampler_name)
  538. step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1
  539. uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc)
  540. c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c)
  541. if len(model_hijack.comments) > 0:
  542. for comment in model_hijack.comments:
  543. comments[comment] = 1
  544. if p.n_iter > 1:
  545. shared.state.job = f"Batch {n+1} out of {p.n_iter}"
  546. with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
  547. samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
  548. x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))]
  549. for x in x_samples_ddim:
  550. devices.test_for_nans(x, "vae")
  551. x_samples_ddim = torch.stack(x_samples_ddim).float()
  552. x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
  553. del samples_ddim
  554. if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
  555. lowvram.send_everything_to_cpu()
  556. devices.torch_gc()
  557. if p.scripts is not None:
  558. p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
  559. for i, x_sample in enumerate(x_samples_ddim):
  560. p.batch_index = i
  561. x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
  562. x_sample = x_sample.astype(np.uint8)
  563. if p.restore_faces:
  564. if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
  565. images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
  566. devices.torch_gc()
  567. x_sample = modules.face_restoration.restore_faces(x_sample)
  568. devices.torch_gc()
  569. image = Image.fromarray(x_sample)
  570. if p.scripts is not None:
  571. pp = scripts.PostprocessImageArgs(image)
  572. p.scripts.postprocess_image(p, pp)
  573. image = pp.image
  574. if p.color_corrections is not None and i < len(p.color_corrections):
  575. if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
  576. image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
  577. images.save_image(image_without_cc, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
  578. image = apply_color_correction(p.color_corrections[i], image)
  579. image = apply_overlay(image, p.paste_to, i, p.overlay_images)
  580. if opts.samples_save and not p.do_not_save_samples:
  581. images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
  582. text = infotext(n, i)
  583. infotexts.append(text)
  584. if opts.enable_pnginfo:
  585. image.info["parameters"] = text
  586. output_images.append(image)
  587. if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]):
  588. image_mask = p.mask_for_overlay.convert('RGB')
  589. image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
  590. if opts.save_mask:
  591. images.save_image(image_mask, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask")
  592. if opts.save_mask_composite:
  593. images.save_image(image_mask_composite, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite")
  594. if opts.return_mask:
  595. output_images.append(image_mask)
  596. if opts.return_mask_composite:
  597. output_images.append(image_mask_composite)
  598. del x_samples_ddim
  599. devices.torch_gc()
  600. state.nextjob()
  601. p.color_corrections = None
  602. index_of_first_image = 0
  603. unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
  604. if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
  605. grid = images.image_grid(output_images, p.batch_size)
  606. if opts.return_grid:
  607. text = infotext()
  608. infotexts.insert(0, text)
  609. if opts.enable_pnginfo:
  610. grid.info["parameters"] = text
  611. output_images.insert(0, grid)
  612. index_of_first_image = 1
  613. if opts.grid_save:
  614. images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True)
  615. if not p.disable_extra_networks and extra_network_data:
  616. extra_networks.deactivate(p, extra_network_data)
  617. devices.torch_gc()
  618. res = Processed(
  619. p,
  620. images_list=output_images,
  621. seed=p.all_seeds[0],
  622. info=infotext(),
  623. comments="".join(f"\n\n{comment}" for comment in comments),
  624. subseed=p.all_subseeds[0],
  625. index_of_first_image=index_of_first_image,
  626. infotexts=infotexts,
  627. )
  628. if p.scripts is not None:
  629. p.scripts.postprocess(p, res)
  630. return res
  631. def old_hires_fix_first_pass_dimensions(width, height):
  632. """old algorithm for auto-calculating first pass size"""
  633. desired_pixel_count = 512 * 512
  634. actual_pixel_count = width * height
  635. scale = math.sqrt(desired_pixel_count / actual_pixel_count)
  636. width = math.ceil(scale * width / 64) * 64
  637. height = math.ceil(scale * height / 64) * 64
  638. return width, height
  639. class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
  640. sampler = None
  641. def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, **kwargs):
  642. super().__init__(**kwargs)
  643. self.enable_hr = enable_hr
  644. self.denoising_strength = denoising_strength
  645. self.hr_scale = hr_scale
  646. self.hr_upscaler = hr_upscaler
  647. self.hr_second_pass_steps = hr_second_pass_steps
  648. self.hr_resize_x = hr_resize_x
  649. self.hr_resize_y = hr_resize_y
  650. self.hr_upscale_to_x = hr_resize_x
  651. self.hr_upscale_to_y = hr_resize_y
  652. if firstphase_width != 0 or firstphase_height != 0:
  653. self.hr_upscale_to_x = self.width
  654. self.hr_upscale_to_y = self.height
  655. self.width = firstphase_width
  656. self.height = firstphase_height
  657. self.truncate_x = 0
  658. self.truncate_y = 0
  659. self.applied_old_hires_behavior_to = None
  660. def init(self, all_prompts, all_seeds, all_subseeds):
  661. if self.enable_hr:
  662. if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height):
  663. self.hr_resize_x = self.width
  664. self.hr_resize_y = self.height
  665. self.hr_upscale_to_x = self.width
  666. self.hr_upscale_to_y = self.height
  667. self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height)
  668. self.applied_old_hires_behavior_to = (self.width, self.height)
  669. if self.hr_resize_x == 0 and self.hr_resize_y == 0:
  670. self.extra_generation_params["Hires upscale"] = self.hr_scale
  671. self.hr_upscale_to_x = int(self.width * self.hr_scale)
  672. self.hr_upscale_to_y = int(self.height * self.hr_scale)
  673. else:
  674. self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}"
  675. if self.hr_resize_y == 0:
  676. self.hr_upscale_to_x = self.hr_resize_x
  677. self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
  678. elif self.hr_resize_x == 0:
  679. self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
  680. self.hr_upscale_to_y = self.hr_resize_y
  681. else:
  682. target_w = self.hr_resize_x
  683. target_h = self.hr_resize_y
  684. src_ratio = self.width / self.height
  685. dst_ratio = self.hr_resize_x / self.hr_resize_y
  686. if src_ratio < dst_ratio:
  687. self.hr_upscale_to_x = self.hr_resize_x
  688. self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
  689. else:
  690. self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
  691. self.hr_upscale_to_y = self.hr_resize_y
  692. self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
  693. self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
  694. # special case: the user has chosen to do nothing
  695. if self.hr_upscale_to_x == self.width and self.hr_upscale_to_y == self.height:
  696. self.enable_hr = False
  697. self.denoising_strength = None
  698. self.extra_generation_params.pop("Hires upscale", None)
  699. self.extra_generation_params.pop("Hires resize", None)
  700. return
  701. if not state.processing_has_refined_job_count:
  702. if state.job_count == -1:
  703. state.job_count = self.n_iter
  704. shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count)
  705. state.job_count = state.job_count * 2
  706. state.processing_has_refined_job_count = True
  707. if self.hr_second_pass_steps:
  708. self.extra_generation_params["Hires steps"] = self.hr_second_pass_steps
  709. if self.hr_upscaler is not None:
  710. self.extra_generation_params["Hires upscaler"] = self.hr_upscaler
  711. def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
  712. self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
  713. latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
  714. if self.enable_hr and latent_scale_mode is None:
  715. assert len([x for x in shared.sd_upscalers if x.name == self.hr_upscaler]) > 0, f"could not find upscaler named {self.hr_upscaler}"
  716. x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
  717. samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
  718. if not self.enable_hr:
  719. return samples
  720. self.is_hr_pass = True
  721. target_width = self.hr_upscale_to_x
  722. target_height = self.hr_upscale_to_y
  723. def save_intermediate(image, index):
  724. """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
  725. if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
  726. return
  727. if not isinstance(image, Image.Image):
  728. image = sd_samplers.sample_to_image(image, index, approximation=0)
  729. info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index)
  730. images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, suffix="-before-highres-fix")
  731. if latent_scale_mode is not None:
  732. for i in range(samples.shape[0]):
  733. save_intermediate(samples, i)
  734. samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
  735. # Avoid making the inpainting conditioning unless necessary as
  736. # this does need some extra compute to decode / encode the image again.
  737. if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0:
  738. image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples)
  739. else:
  740. image_conditioning = self.txt2img_image_conditioning(samples)
  741. else:
  742. decoded_samples = decode_first_stage(self.sd_model, samples)
  743. lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
  744. batch_images = []
  745. for i, x_sample in enumerate(lowres_samples):
  746. x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
  747. x_sample = x_sample.astype(np.uint8)
  748. image = Image.fromarray(x_sample)
  749. save_intermediate(image, i)
  750. image = images.resize_image(0, image, target_width, target_height, upscaler_name=self.hr_upscaler)
  751. image = np.array(image).astype(np.float32) / 255.0
  752. image = np.moveaxis(image, 2, 0)
  753. batch_images.append(image)
  754. decoded_samples = torch.from_numpy(np.array(batch_images))
  755. decoded_samples = decoded_samples.to(shared.device)
  756. decoded_samples = 2. * decoded_samples - 1.
  757. samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
  758. image_conditioning = self.img2img_image_conditioning(decoded_samples, samples)
  759. shared.state.nextjob()
  760. img2img_sampler_name = self.sampler_name
  761. if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM
  762. img2img_sampler_name = 'DDIM'
  763. self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model)
  764. samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2]
  765. noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self)
  766. # GC now before running the next img2img to prevent running out of memory
  767. x = None
  768. devices.torch_gc()
  769. sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio(for_hr=True))
  770. samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
  771. sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio())
  772. self.is_hr_pass = False
  773. return samples
  774. class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
  775. sampler = None
  776. def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs):
  777. super().__init__(**kwargs)
  778. self.init_images = init_images
  779. self.resize_mode: int = resize_mode
  780. self.denoising_strength: float = denoising_strength
  781. self.image_cfg_scale: float = image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
  782. self.init_latent = None
  783. self.image_mask = mask
  784. self.latent_mask = None
  785. self.mask_for_overlay = None
  786. self.mask_blur = mask_blur
  787. self.inpainting_fill = inpainting_fill
  788. self.inpaint_full_res = inpaint_full_res
  789. self.inpaint_full_res_padding = inpaint_full_res_padding
  790. self.inpainting_mask_invert = inpainting_mask_invert
  791. self.initial_noise_multiplier = opts.initial_noise_multiplier if initial_noise_multiplier is None else initial_noise_multiplier
  792. self.mask = None
  793. self.nmask = None
  794. self.image_conditioning = None
  795. def init(self, all_prompts, all_seeds, all_subseeds):
  796. self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
  797. crop_region = None
  798. image_mask = self.image_mask
  799. if image_mask is not None:
  800. image_mask = image_mask.convert('L')
  801. if self.inpainting_mask_invert:
  802. image_mask = ImageOps.invert(image_mask)
  803. if self.mask_blur > 0:
  804. image_mask = image_mask.filter(ImageFilter.GaussianBlur(self.mask_blur))
  805. if self.inpaint_full_res:
  806. self.mask_for_overlay = image_mask
  807. mask = image_mask.convert('L')
  808. crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding)
  809. crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
  810. x1, y1, x2, y2 = crop_region
  811. mask = mask.crop(crop_region)
  812. image_mask = images.resize_image(2, mask, self.width, self.height)
  813. self.paste_to = (x1, y1, x2-x1, y2-y1)
  814. else:
  815. image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
  816. np_mask = np.array(image_mask)
  817. np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
  818. self.mask_for_overlay = Image.fromarray(np_mask)
  819. self.overlay_images = []
  820. latent_mask = self.latent_mask if self.latent_mask is not None else image_mask
  821. add_color_corrections = opts.img2img_color_correction and self.color_corrections is None
  822. if add_color_corrections:
  823. self.color_corrections = []
  824. imgs = []
  825. for img in self.init_images:
  826. # Save init image
  827. if opts.save_init_img:
  828. self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest()
  829. images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False)
  830. image = images.flatten(img, opts.img2img_background_color)
  831. if crop_region is None and self.resize_mode != 3:
  832. image = images.resize_image(self.resize_mode, image, self.width, self.height)
  833. if image_mask is not None:
  834. image_masked = Image.new('RGBa', (image.width, image.height))
  835. image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L')))
  836. self.overlay_images.append(image_masked.convert('RGBA'))
  837. # crop_region is not None if we are doing inpaint full res
  838. if crop_region is not None:
  839. image = image.crop(crop_region)
  840. image = images.resize_image(2, image, self.width, self.height)
  841. if image_mask is not None:
  842. if self.inpainting_fill != 1:
  843. image = masking.fill(image, latent_mask)
  844. if add_color_corrections:
  845. self.color_corrections.append(setup_color_correction(image))
  846. image = np.array(image).astype(np.float32) / 255.0
  847. image = np.moveaxis(image, 2, 0)
  848. imgs.append(image)
  849. if len(imgs) == 1:
  850. batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0)
  851. if self.overlay_images is not None:
  852. self.overlay_images = self.overlay_images * self.batch_size
  853. if self.color_corrections is not None and len(self.color_corrections) == 1:
  854. self.color_corrections = self.color_corrections * self.batch_size
  855. elif len(imgs) <= self.batch_size:
  856. self.batch_size = len(imgs)
  857. batch_images = np.array(imgs)
  858. else:
  859. raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
  860. image = torch.from_numpy(batch_images)
  861. image = 2. * image - 1.
  862. image = image.to(shared.device)
  863. self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
  864. if self.resize_mode == 3:
  865. self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
  866. if image_mask is not None:
  867. init_mask = latent_mask
  868. latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
  869. latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
  870. latmask = latmask[0]
  871. latmask = np.around(latmask)
  872. latmask = np.tile(latmask[None], (4, 1, 1))
  873. self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
  874. self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype)
  875. # this needs to be fixed to be done in sample() using actual seeds for batches
  876. if self.inpainting_fill == 2:
  877. self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask
  878. elif self.inpainting_fill == 3:
  879. self.init_latent = self.init_latent * self.mask
  880. self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, image_mask)
  881. def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
  882. x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
  883. if self.initial_noise_multiplier != 1.0:
  884. self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier
  885. x *= self.initial_noise_multiplier
  886. samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
  887. if self.mask is not None:
  888. samples = samples * self.nmask + self.init_latent * self.mask
  889. del x
  890. devices.torch_gc()
  891. return samples
  892. def get_token_merging_ratio(self, for_hr=False):
  893. return self.token_merging_ratio or ("token_merging_ratio" in self.override_settings and opts.token_merging_ratio) or opts.token_merging_ratio_img2img or opts.token_merging_ratio