sd_models.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609
  1. import collections
  2. import os.path
  3. import sys
  4. import gc
  5. import threading
  6. import torch
  7. import re
  8. import safetensors.torch
  9. from omegaconf import OmegaConf
  10. from os import mkdir
  11. from urllib import request
  12. import ldm.modules.midas as midas
  13. from ldm.util import instantiate_from_config
  14. from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config
  15. from modules.sd_hijack_inpainting import do_inpainting_hijack
  16. from modules.timer import Timer
  17. import tomesd
  18. model_dir = "Stable-diffusion"
  19. model_path = os.path.abspath(os.path.join(paths.models_path, model_dir))
  20. checkpoints_list = {}
  21. checkpoint_alisases = {}
  22. checkpoints_loaded = collections.OrderedDict()
  23. class CheckpointInfo:
  24. def __init__(self, filename):
  25. self.filename = filename
  26. abspath = os.path.abspath(filename)
  27. if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
  28. name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
  29. elif abspath.startswith(model_path):
  30. name = abspath.replace(model_path, '')
  31. else:
  32. name = os.path.basename(filename)
  33. if name.startswith("\\") or name.startswith("/"):
  34. name = name[1:]
  35. self.name = name
  36. self.name_for_extra = os.path.splitext(os.path.basename(filename))[0]
  37. self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
  38. self.hash = model_hash(filename)
  39. self.sha256 = hashes.sha256_from_cache(self.filename, f"checkpoint/{name}")
  40. self.shorthash = self.sha256[0:10] if self.sha256 else None
  41. self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]'
  42. self.ids = [self.hash, self.model_name, self.title, name, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else [])
  43. self.metadata = {}
  44. _, ext = os.path.splitext(self.filename)
  45. if ext.lower() == ".safetensors":
  46. try:
  47. self.metadata = read_metadata_from_safetensors(filename)
  48. except Exception as e:
  49. errors.display(e, f"reading checkpoint metadata: {filename}")
  50. def register(self):
  51. checkpoints_list[self.title] = self
  52. for id in self.ids:
  53. checkpoint_alisases[id] = self
  54. def calculate_shorthash(self):
  55. self.sha256 = hashes.sha256(self.filename, f"checkpoint/{self.name}")
  56. if self.sha256 is None:
  57. return
  58. self.shorthash = self.sha256[0:10]
  59. if self.shorthash not in self.ids:
  60. self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]']
  61. checkpoints_list.pop(self.title)
  62. self.title = f'{self.name} [{self.shorthash}]'
  63. self.register()
  64. return self.shorthash
  65. try:
  66. # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
  67. from transformers import logging, CLIPModel # noqa: F401
  68. logging.set_verbosity_error()
  69. except Exception:
  70. pass
  71. def setup_model():
  72. if not os.path.exists(model_path):
  73. os.makedirs(model_path)
  74. list_models()
  75. enable_midas_autodownload()
  76. def checkpoint_tiles():
  77. def convert(name):
  78. return int(name) if name.isdigit() else name.lower()
  79. def alphanumeric_key(key):
  80. return [convert(c) for c in re.split('([0-9]+)', key)]
  81. return sorted([x.title for x in checkpoints_list.values()], key=alphanumeric_key)
  82. def list_models():
  83. checkpoints_list.clear()
  84. checkpoint_alisases.clear()
  85. cmd_ckpt = shared.cmd_opts.ckpt
  86. if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt):
  87. model_url = None
  88. else:
  89. model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors"
  90. model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"])
  91. if os.path.exists(cmd_ckpt):
  92. checkpoint_info = CheckpointInfo(cmd_ckpt)
  93. checkpoint_info.register()
  94. shared.opts.data['sd_model_checkpoint'] = checkpoint_info.title
  95. elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
  96. print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
  97. for filename in sorted(model_list, key=str.lower):
  98. checkpoint_info = CheckpointInfo(filename)
  99. checkpoint_info.register()
  100. def get_closet_checkpoint_match(search_string):
  101. checkpoint_info = checkpoint_alisases.get(search_string, None)
  102. if checkpoint_info is not None:
  103. return checkpoint_info
  104. found = sorted([info for info in checkpoints_list.values() if search_string in info.title], key=lambda x: len(x.title))
  105. if found:
  106. return found[0]
  107. return None
  108. def model_hash(filename):
  109. """old hash that only looks at a small part of the file and is prone to collisions"""
  110. try:
  111. with open(filename, "rb") as file:
  112. import hashlib
  113. m = hashlib.sha256()
  114. file.seek(0x100000)
  115. m.update(file.read(0x10000))
  116. return m.hexdigest()[0:8]
  117. except FileNotFoundError:
  118. return 'NOFILE'
  119. def select_checkpoint():
  120. model_checkpoint = shared.opts.sd_model_checkpoint
  121. checkpoint_info = checkpoint_alisases.get(model_checkpoint, None)
  122. if checkpoint_info is not None:
  123. return checkpoint_info
  124. if len(checkpoints_list) == 0:
  125. print("No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
  126. if shared.cmd_opts.ckpt is not None:
  127. print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
  128. print(f" - directory {model_path}", file=sys.stderr)
  129. if shared.cmd_opts.ckpt_dir is not None:
  130. print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
  131. print("Can't run without a checkpoint. Find and place a .ckpt or .safetensors file into any of those locations. The program will exit.", file=sys.stderr)
  132. exit(1)
  133. checkpoint_info = next(iter(checkpoints_list.values()))
  134. if model_checkpoint is not None:
  135. print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
  136. return checkpoint_info
  137. checkpoint_dict_replacements = {
  138. 'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
  139. 'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
  140. 'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
  141. }
  142. def transform_checkpoint_dict_key(k):
  143. for text, replacement in checkpoint_dict_replacements.items():
  144. if k.startswith(text):
  145. k = replacement + k[len(text):]
  146. return k
  147. def get_state_dict_from_checkpoint(pl_sd):
  148. pl_sd = pl_sd.pop("state_dict", pl_sd)
  149. pl_sd.pop("state_dict", None)
  150. sd = {}
  151. for k, v in pl_sd.items():
  152. new_key = transform_checkpoint_dict_key(k)
  153. if new_key is not None:
  154. sd[new_key] = v
  155. pl_sd.clear()
  156. pl_sd.update(sd)
  157. return pl_sd
  158. def read_metadata_from_safetensors(filename):
  159. import json
  160. with open(filename, mode="rb") as file:
  161. metadata_len = file.read(8)
  162. metadata_len = int.from_bytes(metadata_len, "little")
  163. json_start = file.read(2)
  164. assert metadata_len > 2 and json_start in (b'{"', b"{'"), f"{filename} is not a safetensors file"
  165. json_data = json_start + file.read(metadata_len-2)
  166. json_obj = json.loads(json_data)
  167. res = {}
  168. for k, v in json_obj.get("__metadata__", {}).items():
  169. res[k] = v
  170. if isinstance(v, str) and v[0:1] == '{':
  171. try:
  172. res[k] = json.loads(v)
  173. except Exception:
  174. pass
  175. return res
  176. def read_state_dict(checkpoint_file, print_global_state=False, map_location=None):
  177. _, extension = os.path.splitext(checkpoint_file)
  178. if extension.lower() == ".safetensors":
  179. device = map_location or shared.weight_load_location or devices.get_optimal_device_name()
  180. pl_sd = safetensors.torch.load_file(checkpoint_file, device=device)
  181. else:
  182. pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location)
  183. if print_global_state and "global_step" in pl_sd:
  184. print(f"Global Step: {pl_sd['global_step']}")
  185. sd = get_state_dict_from_checkpoint(pl_sd)
  186. return sd
  187. def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer):
  188. sd_model_hash = checkpoint_info.calculate_shorthash()
  189. timer.record("calculate hash")
  190. if checkpoint_info in checkpoints_loaded:
  191. # use checkpoint cache
  192. print(f"Loading weights [{sd_model_hash}] from cache")
  193. return checkpoints_loaded[checkpoint_info]
  194. print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}")
  195. res = read_state_dict(checkpoint_info.filename)
  196. timer.record("load weights from disk")
  197. return res
  198. def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer):
  199. sd_model_hash = checkpoint_info.calculate_shorthash()
  200. timer.record("calculate hash")
  201. shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title
  202. if state_dict is None:
  203. state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
  204. model.load_state_dict(state_dict, strict=False)
  205. del state_dict
  206. timer.record("apply weights to model")
  207. if shared.opts.sd_checkpoint_cache > 0:
  208. # cache newly loaded model
  209. checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
  210. if shared.cmd_opts.opt_channelslast:
  211. model.to(memory_format=torch.channels_last)
  212. timer.record("apply channels_last")
  213. if not shared.cmd_opts.no_half:
  214. vae = model.first_stage_model
  215. depth_model = getattr(model, 'depth_model', None)
  216. # with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
  217. if shared.cmd_opts.no_half_vae:
  218. model.first_stage_model = None
  219. # with --upcast-sampling, don't convert the depth model weights to float16
  220. if shared.cmd_opts.upcast_sampling and depth_model:
  221. model.depth_model = None
  222. model.half()
  223. model.first_stage_model = vae
  224. if depth_model:
  225. model.depth_model = depth_model
  226. timer.record("apply half()")
  227. devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
  228. devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
  229. devices.dtype_unet = model.model.diffusion_model.dtype
  230. devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
  231. model.first_stage_model.to(devices.dtype_vae)
  232. timer.record("apply dtype to VAE")
  233. # clean up cache if limit is reached
  234. while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
  235. checkpoints_loaded.popitem(last=False)
  236. model.sd_model_hash = sd_model_hash
  237. model.sd_model_checkpoint = checkpoint_info.filename
  238. model.sd_checkpoint_info = checkpoint_info
  239. shared.opts.data["sd_checkpoint_hash"] = checkpoint_info.sha256
  240. model.logvar = model.logvar.to(devices.device) # fix for training
  241. sd_vae.delete_base_vae()
  242. sd_vae.clear_loaded_vae()
  243. vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename)
  244. sd_vae.load_vae(model, vae_file, vae_source)
  245. timer.record("load VAE")
  246. def enable_midas_autodownload():
  247. """
  248. Gives the ldm.modules.midas.api.load_model function automatic downloading.
  249. When the 512-depth-ema model, and other future models like it, is loaded,
  250. it calls midas.api.load_model to load the associated midas depth model.
  251. This function applies a wrapper to download the model to the correct
  252. location automatically.
  253. """
  254. midas_path = os.path.join(paths.models_path, 'midas')
  255. # stable-diffusion-stability-ai hard-codes the midas model path to
  256. # a location that differs from where other scripts using this model look.
  257. # HACK: Overriding the path here.
  258. for k, v in midas.api.ISL_PATHS.items():
  259. file_name = os.path.basename(v)
  260. midas.api.ISL_PATHS[k] = os.path.join(midas_path, file_name)
  261. midas_urls = {
  262. "dpt_large": "https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
  263. "dpt_hybrid": "https://github.com/intel-isl/DPT/releases/download/1_0/dpt_hybrid-midas-501f0c75.pt",
  264. "midas_v21": "https://github.com/AlexeyAB/MiDaS/releases/download/midas_dpt/midas_v21-f6b98070.pt",
  265. "midas_v21_small": "https://github.com/AlexeyAB/MiDaS/releases/download/midas_dpt/midas_v21_small-70d6b9c8.pt",
  266. }
  267. midas.api.load_model_inner = midas.api.load_model
  268. def load_model_wrapper(model_type):
  269. path = midas.api.ISL_PATHS[model_type]
  270. if not os.path.exists(path):
  271. if not os.path.exists(midas_path):
  272. mkdir(midas_path)
  273. print(f"Downloading midas model weights for {model_type} to {path}")
  274. request.urlretrieve(midas_urls[model_type], path)
  275. print(f"{model_type} downloaded")
  276. return midas.api.load_model_inner(model_type)
  277. midas.api.load_model = load_model_wrapper
  278. def repair_config(sd_config):
  279. if not hasattr(sd_config.model.params, "use_ema"):
  280. sd_config.model.params.use_ema = False
  281. if shared.cmd_opts.no_half:
  282. sd_config.model.params.unet_config.params.use_fp16 = False
  283. elif shared.cmd_opts.upcast_sampling:
  284. sd_config.model.params.unet_config.params.use_fp16 = True
  285. if getattr(sd_config.model.params.first_stage_config.params.ddconfig, "attn_type", None) == "vanilla-xformers" and not shared.xformers_available:
  286. sd_config.model.params.first_stage_config.params.ddconfig.attn_type = "vanilla"
  287. # For UnCLIP-L, override the hardcoded karlo directory
  288. if hasattr(sd_config.model.params, "noise_aug_config") and hasattr(sd_config.model.params.noise_aug_config.params, "clip_stats_path"):
  289. karlo_path = os.path.join(paths.models_path, 'karlo')
  290. sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path)
  291. sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight'
  292. sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight'
  293. class SdModelData:
  294. def __init__(self):
  295. self.sd_model = None
  296. self.lock = threading.Lock()
  297. def get_sd_model(self):
  298. if self.sd_model is None:
  299. with self.lock:
  300. if self.sd_model is not None:
  301. return self.sd_model
  302. try:
  303. load_model()
  304. except Exception as e:
  305. errors.display(e, "loading stable diffusion model")
  306. print("", file=sys.stderr)
  307. print("Stable diffusion model failed to load", file=sys.stderr)
  308. self.sd_model = None
  309. return self.sd_model
  310. def set_sd_model(self, v):
  311. self.sd_model = v
  312. model_data = SdModelData()
  313. def load_model(checkpoint_info=None, already_loaded_state_dict=None):
  314. from modules import lowvram, sd_hijack
  315. checkpoint_info = checkpoint_info or select_checkpoint()
  316. if model_data.sd_model:
  317. sd_hijack.model_hijack.undo_hijack(model_data.sd_model)
  318. model_data.sd_model = None
  319. gc.collect()
  320. devices.torch_gc()
  321. do_inpainting_hijack()
  322. timer = Timer()
  323. if already_loaded_state_dict is not None:
  324. state_dict = already_loaded_state_dict
  325. else:
  326. state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
  327. checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
  328. clip_is_included_into_sd = sd1_clip_weight in state_dict or sd2_clip_weight in state_dict
  329. timer.record("find config")
  330. sd_config = OmegaConf.load(checkpoint_config)
  331. repair_config(sd_config)
  332. timer.record("load config")
  333. print(f"Creating model from config: {checkpoint_config}")
  334. sd_model = None
  335. try:
  336. with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd):
  337. sd_model = instantiate_from_config(sd_config.model)
  338. except Exception:
  339. pass
  340. if sd_model is None:
  341. print('Failed to create model quickly; will retry using slow method.', file=sys.stderr)
  342. sd_model = instantiate_from_config(sd_config.model)
  343. sd_model.used_config = checkpoint_config
  344. timer.record("create model")
  345. load_model_weights(sd_model, checkpoint_info, state_dict, timer)
  346. if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
  347. lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
  348. else:
  349. sd_model.to(shared.device)
  350. timer.record("move model to device")
  351. sd_hijack.model_hijack.hijack(sd_model)
  352. timer.record("hijack")
  353. sd_model.eval()
  354. model_data.sd_model = sd_model
  355. sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model
  356. timer.record("load textual inversion embeddings")
  357. script_callbacks.model_loaded_callback(sd_model)
  358. timer.record("scripts callbacks")
  359. print(f"Model loaded in {timer.summary()}.")
  360. return sd_model
  361. def reload_model_weights(sd_model=None, info=None):
  362. from modules import lowvram, devices, sd_hijack
  363. checkpoint_info = info or select_checkpoint()
  364. if not sd_model:
  365. sd_model = model_data.sd_model
  366. if sd_model is None: # previous model load failed
  367. current_checkpoint_info = None
  368. else:
  369. current_checkpoint_info = sd_model.sd_checkpoint_info
  370. if sd_model.sd_model_checkpoint == checkpoint_info.filename:
  371. return
  372. if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
  373. lowvram.send_everything_to_cpu()
  374. else:
  375. sd_model.to(devices.cpu)
  376. sd_hijack.model_hijack.undo_hijack(sd_model)
  377. timer = Timer()
  378. state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
  379. checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
  380. timer.record("find config")
  381. if sd_model is None or checkpoint_config != sd_model.used_config:
  382. del sd_model
  383. load_model(checkpoint_info, already_loaded_state_dict=state_dict)
  384. return model_data.sd_model
  385. try:
  386. load_model_weights(sd_model, checkpoint_info, state_dict, timer)
  387. except Exception:
  388. print("Failed to load checkpoint, restoring previous")
  389. load_model_weights(sd_model, current_checkpoint_info, None, timer)
  390. raise
  391. finally:
  392. sd_hijack.model_hijack.hijack(sd_model)
  393. timer.record("hijack")
  394. script_callbacks.model_loaded_callback(sd_model)
  395. timer.record("script callbacks")
  396. if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
  397. sd_model.to(devices.device)
  398. timer.record("move model to device")
  399. print(f"Weights loaded in {timer.summary()}.")
  400. return sd_model
  401. def unload_model_weights(sd_model=None, info=None):
  402. from modules import devices, sd_hijack
  403. timer = Timer()
  404. if model_data.sd_model:
  405. model_data.sd_model.to(devices.cpu)
  406. sd_hijack.model_hijack.undo_hijack(model_data.sd_model)
  407. model_data.sd_model = None
  408. sd_model = None
  409. gc.collect()
  410. devices.torch_gc()
  411. torch.cuda.empty_cache()
  412. print(f"Unloaded weights {timer.summary()}.")
  413. return sd_model
  414. def apply_token_merging(sd_model, token_merging_ratio):
  415. """
  416. Applies speed and memory optimizations from tomesd.
  417. """
  418. current_token_merging_ratio = getattr(sd_model, 'applied_token_merged_ratio', 0)
  419. if current_token_merging_ratio == token_merging_ratio:
  420. return
  421. if current_token_merging_ratio > 0:
  422. tomesd.remove_patch(sd_model)
  423. if token_merging_ratio > 0:
  424. tomesd.apply_patch(
  425. sd_model,
  426. ratio=token_merging_ratio,
  427. use_rand=False, # can cause issues with some samplers
  428. merge_attn=True,
  429. merge_crossattn=False,
  430. merge_mlp=False
  431. )
  432. sd_model.applied_token_merged_ratio = token_merging_ratio