processing.py 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056
  1. import json
  2. import math
  3. import os
  4. import sys
  5. import warnings
  6. import torch
  7. import numpy as np
  8. from PIL import Image, ImageFilter, ImageOps
  9. import random
  10. import cv2
  11. from skimage import exposure
  12. from typing import Any, Dict, List, Optional
  13. import modules.sd_hijack
  14. from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts
  15. from modules.sd_hijack import model_hijack
  16. from modules.shared import opts, cmd_opts, state
  17. import modules.shared as shared
  18. import modules.paths as paths
  19. import modules.face_restoration
  20. import modules.images as images
  21. import modules.styles
  22. import modules.sd_models as sd_models
  23. import modules.sd_vae as sd_vae
  24. import logging
  25. from ldm.data.util import AddMiDaS
  26. from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion
  27. from einops import repeat, rearrange
  28. from blendmodes.blend import blendLayers, BlendType
  29. # some of those options should not be changed at all because they would break the model, so I removed them from options.
  30. opt_C = 4
  31. opt_f = 8
  32. def setup_color_correction(image):
  33. logging.info("Calibrating color correction.")
  34. correction_target = cv2.cvtColor(np.asarray(image.copy()), cv2.COLOR_RGB2LAB)
  35. return correction_target
  36. def apply_color_correction(correction, original_image):
  37. logging.info("Applying color correction.")
  38. image = Image.fromarray(cv2.cvtColor(exposure.match_histograms(
  39. cv2.cvtColor(
  40. np.asarray(original_image),
  41. cv2.COLOR_RGB2LAB
  42. ),
  43. correction,
  44. channel_axis=2
  45. ), cv2.COLOR_LAB2RGB).astype("uint8"))
  46. image = blendLayers(image, original_image, BlendType.LUMINOSITY)
  47. return image
  48. def apply_overlay(image, paste_loc, index, overlays):
  49. if overlays is None or index >= len(overlays):
  50. return image
  51. overlay = overlays[index]
  52. if paste_loc is not None:
  53. x, y, w, h = paste_loc
  54. base_image = Image.new('RGBA', (overlay.width, overlay.height))
  55. image = images.resize_image(1, image, w, h)
  56. base_image.paste(image, (x, y))
  57. image = base_image
  58. image = image.convert('RGBA')
  59. image.alpha_composite(overlay)
  60. image = image.convert('RGB')
  61. return image
  62. def txt2img_image_conditioning(sd_model, x, width, height):
  63. if sd_model.model.conditioning_key not in {'hybrid', 'concat'}:
  64. # Dummy zero conditioning if we're not using inpainting model.
  65. # Still takes up a bit of memory, but no encoder call.
  66. # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
  67. return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
  68. # The "masked-image" in this case will just be all zeros since the entire image is masked.
  69. image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
  70. image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning))
  71. # Add the fake full 1s mask to the first dimension.
  72. image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
  73. image_conditioning = image_conditioning.to(x.dtype)
  74. return image_conditioning
  75. class StableDiffusionProcessing:
  76. """
  77. The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
  78. """
  79. def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
  80. if sampler_index is not None:
  81. print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
  82. self.outpath_samples: str = outpath_samples
  83. self.outpath_grids: str = outpath_grids
  84. self.prompt: str = prompt
  85. self.prompt_for_display: str = None
  86. self.negative_prompt: str = (negative_prompt or "")
  87. self.styles: list = styles or []
  88. self.seed: int = seed
  89. self.subseed: int = subseed
  90. self.subseed_strength: float = subseed_strength
  91. self.seed_resize_from_h: int = seed_resize_from_h
  92. self.seed_resize_from_w: int = seed_resize_from_w
  93. self.sampler_name: str = sampler_name
  94. self.batch_size: int = batch_size
  95. self.n_iter: int = n_iter
  96. self.steps: int = steps
  97. self.cfg_scale: float = cfg_scale
  98. self.width: int = width
  99. self.height: int = height
  100. self.restore_faces: bool = restore_faces
  101. self.tiling: bool = tiling
  102. self.do_not_save_samples: bool = do_not_save_samples
  103. self.do_not_save_grid: bool = do_not_save_grid
  104. self.extra_generation_params: dict = extra_generation_params or {}
  105. self.overlay_images = overlay_images
  106. self.eta = eta
  107. self.do_not_reload_embeddings = do_not_reload_embeddings
  108. self.paste_to = None
  109. self.color_corrections = None
  110. self.denoising_strength: float = denoising_strength
  111. self.sampler_noise_scheduler_override = None
  112. self.ddim_discretize = ddim_discretize or opts.ddim_discretize
  113. self.s_churn = s_churn or opts.s_churn
  114. self.s_tmin = s_tmin or opts.s_tmin
  115. self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
  116. self.s_noise = s_noise or opts.s_noise
  117. self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts}
  118. self.override_settings_restore_afterwards = override_settings_restore_afterwards
  119. self.is_using_inpainting_conditioning = False
  120. self.disable_extra_networks = False
  121. if not seed_enable_extras:
  122. self.subseed = -1
  123. self.subseed_strength = 0
  124. self.seed_resize_from_h = 0
  125. self.seed_resize_from_w = 0
  126. self.scripts = None
  127. self.script_args = script_args
  128. self.all_prompts = None
  129. self.all_negative_prompts = None
  130. self.all_seeds = None
  131. self.all_subseeds = None
  132. self.iteration = 0
  133. @property
  134. def sd_model(self):
  135. return shared.sd_model
  136. def txt2img_image_conditioning(self, x, width=None, height=None):
  137. self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'}
  138. return txt2img_image_conditioning(self.sd_model, x, width or self.width, height or self.height)
  139. def depth2img_image_conditioning(self, source_image):
  140. # Use the AddMiDaS helper to Format our source image to suit the MiDaS model
  141. transformer = AddMiDaS(model_type="dpt_hybrid")
  142. transformed = transformer({"jpg": rearrange(source_image[0], "c h w -> h w c")})
  143. midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device)
  144. midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size)
  145. conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image))
  146. conditioning = torch.nn.functional.interpolate(
  147. self.sd_model.depth_model(midas_in),
  148. size=conditioning_image.shape[2:],
  149. mode="bicubic",
  150. align_corners=False,
  151. )
  152. (depth_min, depth_max) = torch.aminmax(conditioning)
  153. conditioning = 2. * (conditioning - depth_min) / (depth_max - depth_min) - 1.
  154. return conditioning
  155. def edit_image_conditioning(self, source_image):
  156. conditioning_image = self.sd_model.encode_first_stage(source_image).mode()
  157. return conditioning_image
  158. def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None):
  159. self.is_using_inpainting_conditioning = True
  160. # Handle the different mask inputs
  161. if image_mask is not None:
  162. if torch.is_tensor(image_mask):
  163. conditioning_mask = image_mask
  164. else:
  165. conditioning_mask = np.array(image_mask.convert("L"))
  166. conditioning_mask = conditioning_mask.astype(np.float32) / 255.0
  167. conditioning_mask = torch.from_numpy(conditioning_mask[None, None])
  168. # Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0
  169. conditioning_mask = torch.round(conditioning_mask)
  170. else:
  171. conditioning_mask = source_image.new_ones(1, 1, *source_image.shape[-2:])
  172. # Create another latent image, this time with a masked version of the original input.
  173. # Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
  174. conditioning_mask = conditioning_mask.to(device=source_image.device, dtype=source_image.dtype)
  175. conditioning_image = torch.lerp(
  176. source_image,
  177. source_image * (1.0 - conditioning_mask),
  178. getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight)
  179. )
  180. # Encode the new masked image using first stage of network.
  181. conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))
  182. # Create the concatenated conditioning tensor to be fed to `c_concat`
  183. conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:])
  184. conditioning_mask = conditioning_mask.expand(conditioning_image.shape[0], -1, -1, -1)
  185. image_conditioning = torch.cat([conditioning_mask, conditioning_image], dim=1)
  186. image_conditioning = image_conditioning.to(shared.device).type(self.sd_model.dtype)
  187. return image_conditioning
  188. def img2img_image_conditioning(self, source_image, latent_image, image_mask=None):
  189. source_image = devices.cond_cast_float(source_image)
  190. # HACK: Using introspection as the Depth2Image model doesn't appear to uniquely
  191. # identify itself with a field common to all models. The conditioning_key is also hybrid.
  192. if isinstance(self.sd_model, LatentDepth2ImageDiffusion):
  193. return self.depth2img_image_conditioning(source_image)
  194. if self.sd_model.cond_stage_key == "edit":
  195. return self.edit_image_conditioning(source_image)
  196. if self.sampler.conditioning_key in {'hybrid', 'concat'}:
  197. return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
  198. # Dummy zero conditioning if we're not using inpainting or depth model.
  199. return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
  200. def init(self, all_prompts, all_seeds, all_subseeds):
  201. pass
  202. def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
  203. raise NotImplementedError()
  204. def close(self):
  205. self.sampler = None
  206. class Processed:
  207. def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""):
  208. self.images = images_list
  209. self.prompt = p.prompt
  210. self.negative_prompt = p.negative_prompt
  211. self.seed = seed
  212. self.subseed = subseed
  213. self.subseed_strength = p.subseed_strength
  214. self.info = info
  215. self.comments = comments
  216. self.width = p.width
  217. self.height = p.height
  218. self.sampler_name = p.sampler_name
  219. self.cfg_scale = p.cfg_scale
  220. self.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
  221. self.steps = p.steps
  222. self.batch_size = p.batch_size
  223. self.restore_faces = p.restore_faces
  224. self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None
  225. self.sd_model_hash = shared.sd_model.sd_model_hash
  226. self.seed_resize_from_w = p.seed_resize_from_w
  227. self.seed_resize_from_h = p.seed_resize_from_h
  228. self.denoising_strength = getattr(p, 'denoising_strength', None)
  229. self.extra_generation_params = p.extra_generation_params
  230. self.index_of_first_image = index_of_first_image
  231. self.styles = p.styles
  232. self.job_timestamp = state.job_timestamp
  233. self.clip_skip = opts.CLIP_stop_at_last_layers
  234. self.eta = p.eta
  235. self.ddim_discretize = p.ddim_discretize
  236. self.s_churn = p.s_churn
  237. self.s_tmin = p.s_tmin
  238. self.s_tmax = p.s_tmax
  239. self.s_noise = p.s_noise
  240. self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
  241. self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
  242. self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
  243. self.seed = int(self.seed if type(self.seed) != list else self.seed[0]) if self.seed is not None else -1
  244. self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
  245. self.is_using_inpainting_conditioning = p.is_using_inpainting_conditioning
  246. self.all_prompts = all_prompts or p.all_prompts or [self.prompt]
  247. self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt]
  248. self.all_seeds = all_seeds or p.all_seeds or [self.seed]
  249. self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
  250. self.infotexts = infotexts or [info]
  251. def js(self):
  252. obj = {
  253. "prompt": self.all_prompts[0],
  254. "all_prompts": self.all_prompts,
  255. "negative_prompt": self.all_negative_prompts[0],
  256. "all_negative_prompts": self.all_negative_prompts,
  257. "seed": self.seed,
  258. "all_seeds": self.all_seeds,
  259. "subseed": self.subseed,
  260. "all_subseeds": self.all_subseeds,
  261. "subseed_strength": self.subseed_strength,
  262. "width": self.width,
  263. "height": self.height,
  264. "sampler_name": self.sampler_name,
  265. "cfg_scale": self.cfg_scale,
  266. "steps": self.steps,
  267. "batch_size": self.batch_size,
  268. "restore_faces": self.restore_faces,
  269. "face_restoration_model": self.face_restoration_model,
  270. "sd_model_hash": self.sd_model_hash,
  271. "seed_resize_from_w": self.seed_resize_from_w,
  272. "seed_resize_from_h": self.seed_resize_from_h,
  273. "denoising_strength": self.denoising_strength,
  274. "extra_generation_params": self.extra_generation_params,
  275. "index_of_first_image": self.index_of_first_image,
  276. "infotexts": self.infotexts,
  277. "styles": self.styles,
  278. "job_timestamp": self.job_timestamp,
  279. "clip_skip": self.clip_skip,
  280. "is_using_inpainting_conditioning": self.is_using_inpainting_conditioning,
  281. }
  282. return json.dumps(obj)
  283. def infotext(self, p: StableDiffusionProcessing, index):
  284. return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size)
  285. # from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
  286. def slerp(val, low, high):
  287. low_norm = low/torch.norm(low, dim=1, keepdim=True)
  288. high_norm = high/torch.norm(high, dim=1, keepdim=True)
  289. dot = (low_norm*high_norm).sum(1)
  290. if dot.mean() > 0.9995:
  291. return low * val + high * (1 - val)
  292. omega = torch.acos(dot)
  293. so = torch.sin(omega)
  294. res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
  295. return res
  296. def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
  297. eta_noise_seed_delta = opts.eta_noise_seed_delta or 0
  298. xs = []
  299. # if we have multiple seeds, this means we are working with batch size>1; this then
  300. # enables the generation of additional tensors with noise that the sampler will use during its processing.
  301. # Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
  302. # produce the same images as with two batches [100], [101].
  303. if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or eta_noise_seed_delta > 0):
  304. sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
  305. else:
  306. sampler_noises = None
  307. for i, seed in enumerate(seeds):
  308. noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8)
  309. subnoise = None
  310. if subseeds is not None:
  311. subseed = 0 if i >= len(subseeds) else subseeds[i]
  312. subnoise = devices.randn(subseed, noise_shape)
  313. # randn results depend on device; gpu and cpu get different results for same seed;
  314. # the way I see it, it's better to do this on CPU, so that everyone gets same result;
  315. # but the original script had it like this, so I do not dare change it for now because
  316. # it will break everyone's seeds.
  317. noise = devices.randn(seed, noise_shape)
  318. if subnoise is not None:
  319. noise = slerp(subseed_strength, noise, subnoise)
  320. if noise_shape != shape:
  321. x = devices.randn(seed, shape)
  322. dx = (shape[2] - noise_shape[2]) // 2
  323. dy = (shape[1] - noise_shape[1]) // 2
  324. w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
  325. h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
  326. tx = 0 if dx < 0 else dx
  327. ty = 0 if dy < 0 else dy
  328. dx = max(-dx, 0)
  329. dy = max(-dy, 0)
  330. x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w]
  331. noise = x
  332. if sampler_noises is not None:
  333. cnt = p.sampler.number_of_needed_noises(p)
  334. if eta_noise_seed_delta > 0:
  335. torch.manual_seed(seed + eta_noise_seed_delta)
  336. for j in range(cnt):
  337. sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
  338. xs.append(noise)
  339. if sampler_noises is not None:
  340. p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises]
  341. x = torch.stack(xs).to(shared.device)
  342. return x
  343. def decode_first_stage(model, x):
  344. with devices.autocast(disable=x.dtype == devices.dtype_vae):
  345. x = model.decode_first_stage(x)
  346. return x
  347. def get_fixed_seed(seed):
  348. if seed is None or seed == '' or seed == -1:
  349. return int(random.randrange(4294967294))
  350. return seed
  351. def fix_seed(p):
  352. p.seed = get_fixed_seed(p.seed)
  353. p.subseed = get_fixed_seed(p.subseed)
  354. def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0):
  355. index = position_in_batch + iteration * p.batch_size
  356. clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
  357. generation_params = {
  358. "Steps": p.steps,
  359. "Sampler": p.sampler_name,
  360. "CFG scale": p.cfg_scale,
  361. "Image CFG scale": getattr(p, 'image_cfg_scale', None),
  362. "Seed": all_seeds[index],
  363. "Face restoration": (opts.face_restoration_model if p.restore_faces else None),
  364. "Size": f"{p.width}x{p.height}",
  365. "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
  366. "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
  367. "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
  368. "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
  369. "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
  370. "Denoising strength": getattr(p, 'denoising_strength', None),
  371. "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
  372. "Clip skip": None if clip_skip <= 1 else clip_skip,
  373. "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta,
  374. }
  375. generation_params.update(p.extra_generation_params)
  376. generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
  377. negative_prompt_text = "\nNegative prompt: " + p.all_negative_prompts[index] if p.all_negative_prompts[index] else ""
  378. return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip()
  379. def process_images(p: StableDiffusionProcessing) -> Processed:
  380. stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}
  381. try:
  382. for k, v in p.override_settings.items():
  383. setattr(opts, k, v)
  384. if k == 'sd_model_checkpoint':
  385. sd_models.reload_model_weights()
  386. if k == 'sd_vae':
  387. sd_vae.reload_vae_weights()
  388. res = process_images_inner(p)
  389. finally:
  390. # restore opts to original state
  391. if p.override_settings_restore_afterwards:
  392. for k, v in stored_opts.items():
  393. setattr(opts, k, v)
  394. if k == 'sd_model_checkpoint':
  395. sd_models.reload_model_weights()
  396. if k == 'sd_vae':
  397. sd_vae.reload_vae_weights()
  398. return res
  399. def process_images_inner(p: StableDiffusionProcessing) -> Processed:
  400. """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
  401. if type(p.prompt) == list:
  402. assert(len(p.prompt) > 0)
  403. else:
  404. assert p.prompt is not None
  405. devices.torch_gc()
  406. seed = get_fixed_seed(p.seed)
  407. subseed = get_fixed_seed(p.subseed)
  408. modules.sd_hijack.model_hijack.apply_circular(p.tiling)
  409. modules.sd_hijack.model_hijack.clear_comments()
  410. comments = {}
  411. if type(p.prompt) == list:
  412. p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.prompt]
  413. else:
  414. p.all_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_styles_to_prompt(p.prompt, p.styles)]
  415. if type(p.negative_prompt) == list:
  416. p.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in p.negative_prompt]
  417. else:
  418. p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)]
  419. if type(seed) == list:
  420. p.all_seeds = seed
  421. else:
  422. p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))]
  423. if type(subseed) == list:
  424. p.all_subseeds = subseed
  425. else:
  426. p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
  427. def infotext(iteration=0, position_in_batch=0):
  428. return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch)
  429. if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
  430. model_hijack.embedding_db.load_textual_inversion_embeddings()
  431. if p.scripts is not None:
  432. p.scripts.process(p)
  433. infotexts = []
  434. output_images = []
  435. cached_uc = [None, None]
  436. cached_c = [None, None]
  437. def get_conds_with_caching(function, required_prompts, steps, cache):
  438. """
  439. Returns the result of calling function(shared.sd_model, required_prompts, steps)
  440. using a cache to store the result if the same arguments have been used before.
  441. cache is an array containing two elements. The first element is a tuple
  442. representing the previously used arguments, or None if no arguments
  443. have been used before. The second element is where the previously
  444. computed result is stored.
  445. """
  446. if cache[0] is not None and (required_prompts, steps) == cache[0]:
  447. return cache[1]
  448. with devices.autocast():
  449. cache[1] = function(shared.sd_model, required_prompts, steps)
  450. cache[0] = (required_prompts, steps)
  451. return cache[1]
  452. with torch.no_grad(), p.sd_model.ema_scope():
  453. with devices.autocast():
  454. p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
  455. # for OSX, loading the model during sampling changes the generated picture, so it is loaded here
  456. if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN":
  457. sd_vae_approx.model()
  458. if state.job_count == -1:
  459. state.job_count = p.n_iter
  460. for n in range(p.n_iter):
  461. p.iteration = n
  462. if state.skipped:
  463. state.skipped = False
  464. if state.interrupted:
  465. break
  466. prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
  467. negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
  468. seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
  469. subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
  470. if len(prompts) == 0:
  471. break
  472. prompts, extra_network_data = extra_networks.parse_prompts(prompts)
  473. if not p.disable_extra_networks:
  474. with devices.autocast():
  475. extra_networks.activate(p, extra_network_data)
  476. if p.scripts is not None:
  477. p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds)
  478. # params.txt should be saved after scripts.process_batch, since the
  479. # infotext could be modified by that callback
  480. # Example: a wildcard processed by process_batch sets an extra model
  481. # strength, which is saved as "Model Strength: 1.0" in the infotext
  482. if n == 0:
  483. with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
  484. processed = Processed(p, [], p.seed, "")
  485. file.write(processed.infotext(p, 0))
  486. uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc)
  487. c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c)
  488. if len(model_hijack.comments) > 0:
  489. for comment in model_hijack.comments:
  490. comments[comment] = 1
  491. if p.n_iter > 1:
  492. shared.state.job = f"Batch {n+1} out of {p.n_iter}"
  493. with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
  494. samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
  495. x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))]
  496. for x in x_samples_ddim:
  497. devices.test_for_nans(x, "vae")
  498. x_samples_ddim = torch.stack(x_samples_ddim).float()
  499. x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
  500. del samples_ddim
  501. if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
  502. lowvram.send_everything_to_cpu()
  503. devices.torch_gc()
  504. if p.scripts is not None:
  505. p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
  506. for i, x_sample in enumerate(x_samples_ddim):
  507. x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
  508. x_sample = x_sample.astype(np.uint8)
  509. if p.restore_faces:
  510. if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
  511. images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
  512. devices.torch_gc()
  513. x_sample = modules.face_restoration.restore_faces(x_sample)
  514. devices.torch_gc()
  515. image = Image.fromarray(x_sample)
  516. if p.scripts is not None:
  517. pp = scripts.PostprocessImageArgs(image)
  518. p.scripts.postprocess_image(p, pp)
  519. image = pp.image
  520. if p.color_corrections is not None and i < len(p.color_corrections):
  521. if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
  522. image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
  523. images.save_image(image_without_cc, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
  524. image = apply_color_correction(p.color_corrections[i], image)
  525. image = apply_overlay(image, p.paste_to, i, p.overlay_images)
  526. if opts.samples_save and not p.do_not_save_samples:
  527. images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
  528. text = infotext(n, i)
  529. infotexts.append(text)
  530. if opts.enable_pnginfo:
  531. image.info["parameters"] = text
  532. output_images.append(image)
  533. del x_samples_ddim
  534. devices.torch_gc()
  535. state.nextjob()
  536. p.color_corrections = None
  537. index_of_first_image = 0
  538. unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
  539. if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
  540. grid = images.image_grid(output_images, p.batch_size)
  541. if opts.return_grid:
  542. text = infotext()
  543. infotexts.insert(0, text)
  544. if opts.enable_pnginfo:
  545. grid.info["parameters"] = text
  546. output_images.insert(0, grid)
  547. index_of_first_image = 1
  548. if opts.grid_save:
  549. images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True)
  550. if not p.disable_extra_networks:
  551. extra_networks.deactivate(p, extra_network_data)
  552. devices.torch_gc()
  553. res = Processed(p, output_images, p.all_seeds[0], infotext(), comments="".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts)
  554. if p.scripts is not None:
  555. p.scripts.postprocess(p, res)
  556. return res
  557. def old_hires_fix_first_pass_dimensions(width, height):
  558. """old algorithm for auto-calculating first pass size"""
  559. desired_pixel_count = 512 * 512
  560. actual_pixel_count = width * height
  561. scale = math.sqrt(desired_pixel_count / actual_pixel_count)
  562. width = math.ceil(scale * width / 64) * 64
  563. height = math.ceil(scale * height / 64) * 64
  564. return width, height
  565. class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
  566. sampler = None
  567. def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, **kwargs):
  568. super().__init__(**kwargs)
  569. self.enable_hr = enable_hr
  570. self.denoising_strength = denoising_strength
  571. self.hr_scale = hr_scale
  572. self.hr_upscaler = hr_upscaler
  573. self.hr_second_pass_steps = hr_second_pass_steps
  574. self.hr_resize_x = hr_resize_x
  575. self.hr_resize_y = hr_resize_y
  576. self.hr_upscale_to_x = hr_resize_x
  577. self.hr_upscale_to_y = hr_resize_y
  578. if firstphase_width != 0 or firstphase_height != 0:
  579. self.hr_upscale_to_x = self.width
  580. self.hr_upscale_to_y = self.height
  581. self.width = firstphase_width
  582. self.height = firstphase_height
  583. self.truncate_x = 0
  584. self.truncate_y = 0
  585. self.applied_old_hires_behavior_to = None
  586. def init(self, all_prompts, all_seeds, all_subseeds):
  587. if self.enable_hr:
  588. if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height):
  589. self.hr_resize_x = self.width
  590. self.hr_resize_y = self.height
  591. self.hr_upscale_to_x = self.width
  592. self.hr_upscale_to_y = self.height
  593. self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height)
  594. self.applied_old_hires_behavior_to = (self.width, self.height)
  595. if self.hr_resize_x == 0 and self.hr_resize_y == 0:
  596. self.extra_generation_params["Hires upscale"] = self.hr_scale
  597. self.hr_upscale_to_x = int(self.width * self.hr_scale)
  598. self.hr_upscale_to_y = int(self.height * self.hr_scale)
  599. else:
  600. self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}"
  601. if self.hr_resize_y == 0:
  602. self.hr_upscale_to_x = self.hr_resize_x
  603. self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
  604. elif self.hr_resize_x == 0:
  605. self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
  606. self.hr_upscale_to_y = self.hr_resize_y
  607. else:
  608. target_w = self.hr_resize_x
  609. target_h = self.hr_resize_y
  610. src_ratio = self.width / self.height
  611. dst_ratio = self.hr_resize_x / self.hr_resize_y
  612. if src_ratio < dst_ratio:
  613. self.hr_upscale_to_x = self.hr_resize_x
  614. self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
  615. else:
  616. self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
  617. self.hr_upscale_to_y = self.hr_resize_y
  618. self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
  619. self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
  620. # special case: the user has chosen to do nothing
  621. if self.hr_upscale_to_x == self.width and self.hr_upscale_to_y == self.height:
  622. self.enable_hr = False
  623. self.denoising_strength = None
  624. self.extra_generation_params.pop("Hires upscale", None)
  625. self.extra_generation_params.pop("Hires resize", None)
  626. return
  627. if not state.processing_has_refined_job_count:
  628. if state.job_count == -1:
  629. state.job_count = self.n_iter
  630. shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count)
  631. state.job_count = state.job_count * 2
  632. state.processing_has_refined_job_count = True
  633. if self.hr_second_pass_steps:
  634. self.extra_generation_params["Hires steps"] = self.hr_second_pass_steps
  635. if self.hr_upscaler is not None:
  636. self.extra_generation_params["Hires upscaler"] = self.hr_upscaler
  637. def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
  638. self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
  639. latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
  640. if self.enable_hr and latent_scale_mode is None:
  641. assert len([x for x in shared.sd_upscalers if x.name == self.hr_upscaler]) > 0, f"could not find upscaler named {self.hr_upscaler}"
  642. x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
  643. samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
  644. if not self.enable_hr:
  645. return samples
  646. target_width = self.hr_upscale_to_x
  647. target_height = self.hr_upscale_to_y
  648. def save_intermediate(image, index):
  649. """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
  650. if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
  651. return
  652. if not isinstance(image, Image.Image):
  653. image = sd_samplers.sample_to_image(image, index, approximation=0)
  654. info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index)
  655. images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, suffix="-before-highres-fix")
  656. if latent_scale_mode is not None:
  657. for i in range(samples.shape[0]):
  658. save_intermediate(samples, i)
  659. samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
  660. # Avoid making the inpainting conditioning unless necessary as
  661. # this does need some extra compute to decode / encode the image again.
  662. if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0:
  663. image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples)
  664. else:
  665. image_conditioning = self.txt2img_image_conditioning(samples)
  666. else:
  667. decoded_samples = decode_first_stage(self.sd_model, samples)
  668. lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
  669. batch_images = []
  670. for i, x_sample in enumerate(lowres_samples):
  671. x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
  672. x_sample = x_sample.astype(np.uint8)
  673. image = Image.fromarray(x_sample)
  674. save_intermediate(image, i)
  675. image = images.resize_image(0, image, target_width, target_height, upscaler_name=self.hr_upscaler)
  676. image = np.array(image).astype(np.float32) / 255.0
  677. image = np.moveaxis(image, 2, 0)
  678. batch_images.append(image)
  679. decoded_samples = torch.from_numpy(np.array(batch_images))
  680. decoded_samples = decoded_samples.to(shared.device)
  681. decoded_samples = 2. * decoded_samples - 1.
  682. samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
  683. image_conditioning = self.img2img_image_conditioning(decoded_samples, samples)
  684. shared.state.nextjob()
  685. img2img_sampler_name = self.sampler_name if self.sampler_name != 'PLMS' else 'DDIM' # PLMS does not support img2img so we just silently switch ot DDIM
  686. self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model)
  687. samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2]
  688. noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self)
  689. # GC now before running the next img2img to prevent running out of memory
  690. x = None
  691. devices.torch_gc()
  692. samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
  693. return samples
  694. class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
  695. sampler = None
  696. def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs):
  697. super().__init__(**kwargs)
  698. self.init_images = init_images
  699. self.resize_mode: int = resize_mode
  700. self.denoising_strength: float = denoising_strength
  701. self.image_cfg_scale: float = image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
  702. self.init_latent = None
  703. self.image_mask = mask
  704. self.latent_mask = None
  705. self.mask_for_overlay = None
  706. self.mask_blur = mask_blur
  707. self.inpainting_fill = inpainting_fill
  708. self.inpaint_full_res = inpaint_full_res
  709. self.inpaint_full_res_padding = inpaint_full_res_padding
  710. self.inpainting_mask_invert = inpainting_mask_invert
  711. self.initial_noise_multiplier = opts.initial_noise_multiplier if initial_noise_multiplier is None else initial_noise_multiplier
  712. self.mask = None
  713. self.nmask = None
  714. self.image_conditioning = None
  715. def init(self, all_prompts, all_seeds, all_subseeds):
  716. self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
  717. crop_region = None
  718. image_mask = self.image_mask
  719. if image_mask is not None:
  720. image_mask = image_mask.convert('L')
  721. if self.inpainting_mask_invert:
  722. image_mask = ImageOps.invert(image_mask)
  723. if self.mask_blur > 0:
  724. image_mask = image_mask.filter(ImageFilter.GaussianBlur(self.mask_blur))
  725. if self.inpaint_full_res:
  726. self.mask_for_overlay = image_mask
  727. mask = image_mask.convert('L')
  728. crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding)
  729. crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
  730. x1, y1, x2, y2 = crop_region
  731. mask = mask.crop(crop_region)
  732. image_mask = images.resize_image(2, mask, self.width, self.height)
  733. self.paste_to = (x1, y1, x2-x1, y2-y1)
  734. else:
  735. image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
  736. np_mask = np.array(image_mask)
  737. np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
  738. self.mask_for_overlay = Image.fromarray(np_mask)
  739. self.overlay_images = []
  740. latent_mask = self.latent_mask if self.latent_mask is not None else image_mask
  741. add_color_corrections = opts.img2img_color_correction and self.color_corrections is None
  742. if add_color_corrections:
  743. self.color_corrections = []
  744. imgs = []
  745. for img in self.init_images:
  746. image = images.flatten(img, opts.img2img_background_color)
  747. if crop_region is None and self.resize_mode != 3:
  748. image = images.resize_image(self.resize_mode, image, self.width, self.height)
  749. if image_mask is not None:
  750. image_masked = Image.new('RGBa', (image.width, image.height))
  751. image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L')))
  752. self.overlay_images.append(image_masked.convert('RGBA'))
  753. # crop_region is not None if we are doing inpaint full res
  754. if crop_region is not None:
  755. image = image.crop(crop_region)
  756. image = images.resize_image(2, image, self.width, self.height)
  757. if image_mask is not None:
  758. if self.inpainting_fill != 1:
  759. image = masking.fill(image, latent_mask)
  760. if add_color_corrections:
  761. self.color_corrections.append(setup_color_correction(image))
  762. image = np.array(image).astype(np.float32) / 255.0
  763. image = np.moveaxis(image, 2, 0)
  764. imgs.append(image)
  765. if len(imgs) == 1:
  766. batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0)
  767. if self.overlay_images is not None:
  768. self.overlay_images = self.overlay_images * self.batch_size
  769. if self.color_corrections is not None and len(self.color_corrections) == 1:
  770. self.color_corrections = self.color_corrections * self.batch_size
  771. elif len(imgs) <= self.batch_size:
  772. self.batch_size = len(imgs)
  773. batch_images = np.array(imgs)
  774. else:
  775. raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
  776. image = torch.from_numpy(batch_images)
  777. image = 2. * image - 1.
  778. image = image.to(shared.device)
  779. self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
  780. if self.resize_mode == 3:
  781. self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
  782. if image_mask is not None:
  783. init_mask = latent_mask
  784. latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
  785. latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
  786. latmask = latmask[0]
  787. latmask = np.around(latmask)
  788. latmask = np.tile(latmask[None], (4, 1, 1))
  789. self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
  790. self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype)
  791. # this needs to be fixed to be done in sample() using actual seeds for batches
  792. if self.inpainting_fill == 2:
  793. self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask
  794. elif self.inpainting_fill == 3:
  795. self.init_latent = self.init_latent * self.mask
  796. self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, image_mask)
  797. def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
  798. x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
  799. if self.initial_noise_multiplier != 1.0:
  800. self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier
  801. x *= self.initial_noise_multiplier
  802. samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
  803. if self.mask is not None:
  804. samples = samples * self.nmask + self.init_latent * self.mask
  805. del x
  806. devices.torch_gc()
  807. return samples