sd3_impls.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. ### Impls of the SD3 core diffusion model and VAE
  2. import torch
  3. import math
  4. import einops
  5. from modules.models.sd3.mmdit import MMDiT
  6. from PIL import Image
  7. #################################################################################################
  8. ### MMDiT Model Wrapping
  9. #################################################################################################
  10. class ModelSamplingDiscreteFlow(torch.nn.Module):
  11. """Helper for sampler scheduling (ie timestep/sigma calculations) for Discrete Flow models"""
  12. def __init__(self, shift=1.0):
  13. super().__init__()
  14. self.shift = shift
  15. timesteps = 1000
  16. ts = self.sigma(torch.arange(1, timesteps + 1, 1))
  17. self.register_buffer('sigmas', ts)
  18. @property
  19. def sigma_min(self):
  20. return self.sigmas[0]
  21. @property
  22. def sigma_max(self):
  23. return self.sigmas[-1]
  24. def timestep(self, sigma):
  25. return sigma * 1000
  26. def sigma(self, timestep: torch.Tensor):
  27. timestep = timestep / 1000.0
  28. if self.shift == 1.0:
  29. return timestep
  30. return self.shift * timestep / (1 + (self.shift - 1) * timestep)
  31. def calculate_denoised(self, sigma, model_output, model_input):
  32. sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
  33. return model_input - model_output * sigma
  34. def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
  35. return sigma * noise + (1.0 - sigma) * latent_image
  36. class BaseModel(torch.nn.Module):
  37. """Wrapper around the core MM-DiT model"""
  38. def __init__(self, shift=1.0, device=None, dtype=torch.float32, state_dict=None, prefix=""):
  39. super().__init__()
  40. # Important configuration values can be quickly determined by checking shapes in the source file
  41. # Some of these will vary between models (eg 2B vs 8B primarily differ in their depth, but also other details change)
  42. patch_size = state_dict[f"{prefix}x_embedder.proj.weight"].shape[2]
  43. depth = state_dict[f"{prefix}x_embedder.proj.weight"].shape[0] // 64
  44. num_patches = state_dict[f"{prefix}pos_embed"].shape[1]
  45. pos_embed_max_size = round(math.sqrt(num_patches))
  46. adm_in_channels = state_dict[f"{prefix}y_embedder.mlp.0.weight"].shape[1]
  47. context_shape = state_dict[f"{prefix}context_embedder.weight"].shape
  48. context_embedder_config = {
  49. "target": "torch.nn.Linear",
  50. "params": {
  51. "in_features": context_shape[1],
  52. "out_features": context_shape[0]
  53. }
  54. }
  55. self.diffusion_model = MMDiT(input_size=None, pos_embed_scaling_factor=None, pos_embed_offset=None, pos_embed_max_size=pos_embed_max_size, patch_size=patch_size, in_channels=16, depth=depth, num_patches=num_patches, adm_in_channels=adm_in_channels, context_embedder_config=context_embedder_config, device=device, dtype=dtype)
  56. self.model_sampling = ModelSamplingDiscreteFlow(shift=shift)
  57. self.depth = depth
  58. def apply_model(self, x, sigma, c_crossattn=None, y=None):
  59. dtype = self.get_dtype()
  60. timestep = self.model_sampling.timestep(sigma).float()
  61. model_output = self.diffusion_model(x.to(dtype), timestep, context=c_crossattn.to(dtype), y=y.to(dtype)).float()
  62. return self.model_sampling.calculate_denoised(sigma, model_output, x)
  63. def forward(self, *args, **kwargs):
  64. return self.apply_model(*args, **kwargs)
  65. def get_dtype(self):
  66. return self.diffusion_model.dtype
  67. class CFGDenoiser(torch.nn.Module):
  68. """Helper for applying CFG Scaling to diffusion outputs"""
  69. def __init__(self, model):
  70. super().__init__()
  71. self.model = model
  72. def forward(self, x, timestep, cond, uncond, cond_scale):
  73. # Run cond and uncond in a batch together
  74. batched = self.model.apply_model(torch.cat([x, x]), torch.cat([timestep, timestep]), c_crossattn=torch.cat([cond["c_crossattn"], uncond["c_crossattn"]]), y=torch.cat([cond["y"], uncond["y"]]))
  75. # Then split and apply CFG Scaling
  76. pos_out, neg_out = batched.chunk(2)
  77. scaled = neg_out + (pos_out - neg_out) * cond_scale
  78. return scaled
  79. class SD3LatentFormat:
  80. """Latents are slightly shifted from center - this class must be called after VAE Decode to correct for the shift"""
  81. def __init__(self):
  82. self.scale_factor = 1.5305
  83. self.shift_factor = 0.0609
  84. def process_in(self, latent):
  85. return (latent - self.shift_factor) * self.scale_factor
  86. def process_out(self, latent):
  87. return (latent / self.scale_factor) + self.shift_factor
  88. def decode_latent_to_preview(self, x0):
  89. """Quick RGB approximate preview of sd3 latents"""
  90. factors = torch.tensor([
  91. [-0.0645, 0.0177, 0.1052], [ 0.0028, 0.0312, 0.0650],
  92. [ 0.1848, 0.0762, 0.0360], [ 0.0944, 0.0360, 0.0889],
  93. [ 0.0897, 0.0506, -0.0364], [-0.0020, 0.1203, 0.0284],
  94. [ 0.0855, 0.0118, 0.0283], [-0.0539, 0.0658, 0.1047],
  95. [-0.0057, 0.0116, 0.0700], [-0.0412, 0.0281, -0.0039],
  96. [ 0.1106, 0.1171, 0.1220], [-0.0248, 0.0682, -0.0481],
  97. [ 0.0815, 0.0846, 0.1207], [-0.0120, -0.0055, -0.0867],
  98. [-0.0749, -0.0634, -0.0456], [-0.1418, -0.1457, -0.1259]
  99. ], device="cpu")
  100. latent_image = x0[0].permute(1, 2, 0).cpu() @ factors
  101. latents_ubyte = (((latent_image + 1) / 2)
  102. .clamp(0, 1) # change scale from -1..1 to 0..1
  103. .mul(0xFF) # to 0..255
  104. .byte()).cpu()
  105. return Image.fromarray(latents_ubyte.numpy())
  106. #################################################################################################
  107. ### K-Diffusion Sampling
  108. #################################################################################################
  109. def append_dims(x, target_dims):
  110. """Appends dimensions to the end of a tensor until it has target_dims dimensions."""
  111. dims_to_append = target_dims - x.ndim
  112. return x[(...,) + (None,) * dims_to_append]
  113. def to_d(x, sigma, denoised):
  114. """Converts a denoiser output to a Karras ODE derivative."""
  115. return (x - denoised) / append_dims(sigma, x.ndim)
  116. @torch.no_grad()
  117. @torch.autocast("cuda", dtype=torch.float16)
  118. def sample_euler(model, x, sigmas, extra_args=None):
  119. """Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
  120. extra_args = {} if extra_args is None else extra_args
  121. s_in = x.new_ones([x.shape[0]])
  122. for i in range(len(sigmas) - 1):
  123. sigma_hat = sigmas[i]
  124. denoised = model(x, sigma_hat * s_in, **extra_args)
  125. d = to_d(x, sigma_hat, denoised)
  126. dt = sigmas[i + 1] - sigma_hat
  127. # Euler method
  128. x = x + d * dt
  129. return x
  130. #################################################################################################
  131. ### VAE
  132. #################################################################################################
  133. def Normalize(in_channels, num_groups=32, dtype=torch.float32, device=None):
  134. return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
  135. class ResnetBlock(torch.nn.Module):
  136. def __init__(self, *, in_channels, out_channels=None, dtype=torch.float32, device=None):
  137. super().__init__()
  138. self.in_channels = in_channels
  139. out_channels = in_channels if out_channels is None else out_channels
  140. self.out_channels = out_channels
  141. self.norm1 = Normalize(in_channels, dtype=dtype, device=device)
  142. self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
  143. self.norm2 = Normalize(out_channels, dtype=dtype, device=device)
  144. self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
  145. if self.in_channels != self.out_channels:
  146. self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device)
  147. else:
  148. self.nin_shortcut = None
  149. self.swish = torch.nn.SiLU(inplace=True)
  150. def forward(self, x):
  151. hidden = x
  152. hidden = self.norm1(hidden)
  153. hidden = self.swish(hidden)
  154. hidden = self.conv1(hidden)
  155. hidden = self.norm2(hidden)
  156. hidden = self.swish(hidden)
  157. hidden = self.conv2(hidden)
  158. if self.in_channels != self.out_channels:
  159. x = self.nin_shortcut(x)
  160. return x + hidden
  161. class AttnBlock(torch.nn.Module):
  162. def __init__(self, in_channels, dtype=torch.float32, device=None):
  163. super().__init__()
  164. self.norm = Normalize(in_channels, dtype=dtype, device=device)
  165. self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device)
  166. self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device)
  167. self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device)
  168. self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device)
  169. def forward(self, x):
  170. hidden = self.norm(x)
  171. q = self.q(hidden)
  172. k = self.k(hidden)
  173. v = self.v(hidden)
  174. b, c, h, w = q.shape
  175. q, k, v = [einops.rearrange(x, "b c h w -> b 1 (h w) c").contiguous() for x in (q, k, v)]
  176. hidden = torch.nn.functional.scaled_dot_product_attention(q, k, v) # scale is dim ** -0.5 per default
  177. hidden = einops.rearrange(hidden, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b)
  178. hidden = self.proj_out(hidden)
  179. return x + hidden
  180. class Downsample(torch.nn.Module):
  181. def __init__(self, in_channels, dtype=torch.float32, device=None):
  182. super().__init__()
  183. self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0, dtype=dtype, device=device)
  184. def forward(self, x):
  185. pad = (0,1,0,1)
  186. x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
  187. x = self.conv(x)
  188. return x
  189. class Upsample(torch.nn.Module):
  190. def __init__(self, in_channels, dtype=torch.float32, device=None):
  191. super().__init__()
  192. self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
  193. def forward(self, x):
  194. x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
  195. x = self.conv(x)
  196. return x
  197. class VAEEncoder(torch.nn.Module):
  198. def __init__(self, ch=128, ch_mult=(1,2,4,4), num_res_blocks=2, in_channels=3, z_channels=16, dtype=torch.float32, device=None):
  199. super().__init__()
  200. self.num_resolutions = len(ch_mult)
  201. self.num_res_blocks = num_res_blocks
  202. # downsampling
  203. self.conv_in = torch.nn.Conv2d(in_channels, ch, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
  204. in_ch_mult = (1,) + tuple(ch_mult)
  205. self.in_ch_mult = in_ch_mult
  206. self.down = torch.nn.ModuleList()
  207. for i_level in range(self.num_resolutions):
  208. block = torch.nn.ModuleList()
  209. attn = torch.nn.ModuleList()
  210. block_in = ch*in_ch_mult[i_level]
  211. block_out = ch*ch_mult[i_level]
  212. for _ in range(num_res_blocks):
  213. block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, dtype=dtype, device=device))
  214. block_in = block_out
  215. down = torch.nn.Module()
  216. down.block = block
  217. down.attn = attn
  218. if i_level != self.num_resolutions - 1:
  219. down.downsample = Downsample(block_in, dtype=dtype, device=device)
  220. self.down.append(down)
  221. # middle
  222. self.mid = torch.nn.Module()
  223. self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device)
  224. self.mid.attn_1 = AttnBlock(block_in, dtype=dtype, device=device)
  225. self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device)
  226. # end
  227. self.norm_out = Normalize(block_in, dtype=dtype, device=device)
  228. self.conv_out = torch.nn.Conv2d(block_in, 2 * z_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
  229. self.swish = torch.nn.SiLU(inplace=True)
  230. def forward(self, x):
  231. # downsampling
  232. hs = [self.conv_in(x)]
  233. for i_level in range(self.num_resolutions):
  234. for i_block in range(self.num_res_blocks):
  235. h = self.down[i_level].block[i_block](hs[-1])
  236. hs.append(h)
  237. if i_level != self.num_resolutions-1:
  238. hs.append(self.down[i_level].downsample(hs[-1]))
  239. # middle
  240. h = hs[-1]
  241. h = self.mid.block_1(h)
  242. h = self.mid.attn_1(h)
  243. h = self.mid.block_2(h)
  244. # end
  245. h = self.norm_out(h)
  246. h = self.swish(h)
  247. h = self.conv_out(h)
  248. return h
  249. class VAEDecoder(torch.nn.Module):
  250. def __init__(self, ch=128, out_ch=3, ch_mult=(1, 2, 4, 4), num_res_blocks=2, resolution=256, z_channels=16, dtype=torch.float32, device=None):
  251. super().__init__()
  252. self.num_resolutions = len(ch_mult)
  253. self.num_res_blocks = num_res_blocks
  254. block_in = ch * ch_mult[self.num_resolutions - 1]
  255. curr_res = resolution // 2 ** (self.num_resolutions - 1)
  256. # z to block_in
  257. self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
  258. # middle
  259. self.mid = torch.nn.Module()
  260. self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device)
  261. self.mid.attn_1 = AttnBlock(block_in, dtype=dtype, device=device)
  262. self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device)
  263. # upsampling
  264. self.up = torch.nn.ModuleList()
  265. for i_level in reversed(range(self.num_resolutions)):
  266. block = torch.nn.ModuleList()
  267. block_out = ch * ch_mult[i_level]
  268. for _ in range(self.num_res_blocks + 1):
  269. block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, dtype=dtype, device=device))
  270. block_in = block_out
  271. up = torch.nn.Module()
  272. up.block = block
  273. if i_level != 0:
  274. up.upsample = Upsample(block_in, dtype=dtype, device=device)
  275. curr_res = curr_res * 2
  276. self.up.insert(0, up) # prepend to get consistent order
  277. # end
  278. self.norm_out = Normalize(block_in, dtype=dtype, device=device)
  279. self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
  280. self.swish = torch.nn.SiLU(inplace=True)
  281. def forward(self, z):
  282. # z to block_in
  283. hidden = self.conv_in(z)
  284. # middle
  285. hidden = self.mid.block_1(hidden)
  286. hidden = self.mid.attn_1(hidden)
  287. hidden = self.mid.block_2(hidden)
  288. # upsampling
  289. for i_level in reversed(range(self.num_resolutions)):
  290. for i_block in range(self.num_res_blocks + 1):
  291. hidden = self.up[i_level].block[i_block](hidden)
  292. if i_level != 0:
  293. hidden = self.up[i_level].upsample(hidden)
  294. # end
  295. hidden = self.norm_out(hidden)
  296. hidden = self.swish(hidden)
  297. hidden = self.conv_out(hidden)
  298. return hidden
  299. class SDVAE(torch.nn.Module):
  300. def __init__(self, dtype=torch.float32, device=None):
  301. super().__init__()
  302. self.encoder = VAEEncoder(dtype=dtype, device=device)
  303. self.decoder = VAEDecoder(dtype=dtype, device=device)
  304. @torch.autocast("cuda", dtype=torch.float16)
  305. def decode(self, latent):
  306. return self.decoder(latent)
  307. @torch.autocast("cuda", dtype=torch.float16)
  308. def encode(self, image):
  309. hidden = self.encoder(image)
  310. mean, logvar = torch.chunk(hidden, 2, dim=1)
  311. logvar = torch.clamp(logvar, -30.0, 20.0)
  312. std = torch.exp(0.5 * logvar)
  313. return mean + std * torch.randn_like(mean)