sd_models.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. import collections
  2. import os.path
  3. import sys
  4. import gc
  5. import threading
  6. import torch
  7. import re
  8. import safetensors.torch
  9. from omegaconf import OmegaConf
  10. from os import mkdir
  11. from urllib import request
  12. import ldm.modules.midas as midas
  13. from ldm.util import instantiate_from_config
  14. from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet
  15. from modules.sd_hijack_inpainting import do_inpainting_hijack
  16. from modules.timer import Timer
  17. import tomesd
  18. model_dir = "Stable-diffusion"
  19. model_path = os.path.abspath(os.path.join(paths.models_path, model_dir))
  20. checkpoints_list = {}
  21. checkpoint_alisases = {}
  22. checkpoints_loaded = collections.OrderedDict()
  23. class CheckpointInfo:
  24. def __init__(self, filename):
  25. self.filename = filename
  26. abspath = os.path.abspath(filename)
  27. if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
  28. name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
  29. elif abspath.startswith(model_path):
  30. name = abspath.replace(model_path, '')
  31. else:
  32. name = os.path.basename(filename)
  33. if name.startswith("\\") or name.startswith("/"):
  34. name = name[1:]
  35. self.name = name
  36. self.name_for_extra = os.path.splitext(os.path.basename(filename))[0]
  37. self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
  38. self.hash = model_hash(filename)
  39. self.sha256 = hashes.sha256_from_cache(self.filename, f"checkpoint/{name}")
  40. self.shorthash = self.sha256[0:10] if self.sha256 else None
  41. self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]'
  42. self.ids = [self.hash, self.model_name, self.title, name, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else [])
  43. self.metadata = {}
  44. _, ext = os.path.splitext(self.filename)
  45. if ext.lower() == ".safetensors":
  46. try:
  47. self.metadata = read_metadata_from_safetensors(filename)
  48. except Exception as e:
  49. errors.display(e, f"reading checkpoint metadata: {filename}")
  50. def register(self):
  51. checkpoints_list[self.title] = self
  52. for id in self.ids:
  53. checkpoint_alisases[id] = self
  54. def calculate_shorthash(self):
  55. self.sha256 = hashes.sha256(self.filename, f"checkpoint/{self.name}")
  56. if self.sha256 is None:
  57. return
  58. self.shorthash = self.sha256[0:10]
  59. if self.shorthash not in self.ids:
  60. self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]']
  61. checkpoints_list.pop(self.title)
  62. self.title = f'{self.name} [{self.shorthash}]'
  63. self.register()
  64. return self.shorthash
  65. try:
  66. # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
  67. from transformers import logging, CLIPModel # noqa: F401
  68. logging.set_verbosity_error()
  69. except Exception:
  70. pass
  71. def setup_model():
  72. if not os.path.exists(model_path):
  73. os.makedirs(model_path)
  74. enable_midas_autodownload()
  75. def checkpoint_tiles():
  76. def convert(name):
  77. return int(name) if name.isdigit() else name.lower()
  78. def alphanumeric_key(key):
  79. return [convert(c) for c in re.split('([0-9]+)', key)]
  80. return sorted([x.title for x in checkpoints_list.values()], key=alphanumeric_key)
  81. def list_models():
  82. checkpoints_list.clear()
  83. checkpoint_alisases.clear()
  84. cmd_ckpt = shared.cmd_opts.ckpt
  85. if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt):
  86. model_url = None
  87. else:
  88. model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors"
  89. model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"])
  90. if os.path.exists(cmd_ckpt):
  91. checkpoint_info = CheckpointInfo(cmd_ckpt)
  92. checkpoint_info.register()
  93. shared.opts.data['sd_model_checkpoint'] = checkpoint_info.title
  94. elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
  95. print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
  96. for filename in sorted(model_list, key=str.lower):
  97. checkpoint_info = CheckpointInfo(filename)
  98. checkpoint_info.register()
  99. def get_closet_checkpoint_match(search_string):
  100. checkpoint_info = checkpoint_alisases.get(search_string, None)
  101. if checkpoint_info is not None:
  102. return checkpoint_info
  103. found = sorted([info for info in checkpoints_list.values() if search_string in info.title], key=lambda x: len(x.title))
  104. if found:
  105. return found[0]
  106. return None
  107. def model_hash(filename):
  108. """old hash that only looks at a small part of the file and is prone to collisions"""
  109. try:
  110. with open(filename, "rb") as file:
  111. import hashlib
  112. m = hashlib.sha256()
  113. file.seek(0x100000)
  114. m.update(file.read(0x10000))
  115. return m.hexdigest()[0:8]
  116. except FileNotFoundError:
  117. return 'NOFILE'
  118. def select_checkpoint():
  119. """Raises `FileNotFoundError` if no checkpoints are found."""
  120. model_checkpoint = shared.opts.sd_model_checkpoint
  121. checkpoint_info = checkpoint_alisases.get(model_checkpoint, None)
  122. if checkpoint_info is not None:
  123. return checkpoint_info
  124. if len(checkpoints_list) == 0:
  125. error_message = "No checkpoints found. When searching for checkpoints, looked at:"
  126. if shared.cmd_opts.ckpt is not None:
  127. error_message += f"\n - file {os.path.abspath(shared.cmd_opts.ckpt)}"
  128. error_message += f"\n - directory {model_path}"
  129. if shared.cmd_opts.ckpt_dir is not None:
  130. error_message += f"\n - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}"
  131. error_message += "Can't run without a checkpoint. Find and place a .ckpt or .safetensors file into any of those locations."
  132. raise FileNotFoundError(error_message)
  133. checkpoint_info = next(iter(checkpoints_list.values()))
  134. if model_checkpoint is not None:
  135. print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
  136. return checkpoint_info
  137. checkpoint_dict_replacements = {
  138. 'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
  139. 'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
  140. 'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
  141. }
  142. def transform_checkpoint_dict_key(k):
  143. for text, replacement in checkpoint_dict_replacements.items():
  144. if k.startswith(text):
  145. k = replacement + k[len(text):]
  146. return k
  147. def get_state_dict_from_checkpoint(pl_sd):
  148. pl_sd = pl_sd.pop("state_dict", pl_sd)
  149. pl_sd.pop("state_dict", None)
  150. sd = {}
  151. for k, v in pl_sd.items():
  152. new_key = transform_checkpoint_dict_key(k)
  153. if new_key is not None:
  154. sd[new_key] = v
  155. pl_sd.clear()
  156. pl_sd.update(sd)
  157. return pl_sd
  158. def read_metadata_from_safetensors(filename):
  159. import json
  160. with open(filename, mode="rb") as file:
  161. metadata_len = file.read(8)
  162. metadata_len = int.from_bytes(metadata_len, "little")
  163. json_start = file.read(2)
  164. assert metadata_len > 2 and json_start in (b'{"', b"{'"), f"{filename} is not a safetensors file"
  165. json_data = json_start + file.read(metadata_len-2)
  166. json_obj = json.loads(json_data)
  167. res = {}
  168. for k, v in json_obj.get("__metadata__", {}).items():
  169. res[k] = v
  170. if isinstance(v, str) and v[0:1] == '{':
  171. try:
  172. res[k] = json.loads(v)
  173. except Exception:
  174. pass
  175. return res
  176. def read_state_dict(checkpoint_file, print_global_state=False, map_location=None):
  177. _, extension = os.path.splitext(checkpoint_file)
  178. if extension.lower() == ".safetensors":
  179. device = map_location or shared.weight_load_location or devices.get_optimal_device_name()
  180. pl_sd = safetensors.torch.load_file(checkpoint_file, device=device)
  181. else:
  182. pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location)
  183. if print_global_state and "global_step" in pl_sd:
  184. print(f"Global Step: {pl_sd['global_step']}")
  185. sd = get_state_dict_from_checkpoint(pl_sd)
  186. return sd
  187. def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer):
  188. sd_model_hash = checkpoint_info.calculate_shorthash()
  189. timer.record("calculate hash")
  190. if checkpoint_info in checkpoints_loaded:
  191. # use checkpoint cache
  192. print(f"Loading weights [{sd_model_hash}] from cache")
  193. return checkpoints_loaded[checkpoint_info]
  194. print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}")
  195. res = read_state_dict(checkpoint_info.filename)
  196. timer.record("load weights from disk")
  197. return res
  198. def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer):
  199. sd_model_hash = checkpoint_info.calculate_shorthash()
  200. timer.record("calculate hash")
  201. shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title
  202. if state_dict is None:
  203. state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
  204. model.load_state_dict(state_dict, strict=False)
  205. del state_dict
  206. timer.record("apply weights to model")
  207. if shared.opts.sd_checkpoint_cache > 0:
  208. # cache newly loaded model
  209. checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
  210. if shared.cmd_opts.opt_channelslast:
  211. model.to(memory_format=torch.channels_last)
  212. timer.record("apply channels_last")
  213. if not shared.cmd_opts.no_half:
  214. vae = model.first_stage_model
  215. depth_model = getattr(model, 'depth_model', None)
  216. # with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
  217. if shared.cmd_opts.no_half_vae:
  218. model.first_stage_model = None
  219. # with --upcast-sampling, don't convert the depth model weights to float16
  220. if shared.cmd_opts.upcast_sampling and depth_model:
  221. model.depth_model = None
  222. model.half()
  223. model.first_stage_model = vae
  224. if depth_model:
  225. model.depth_model = depth_model
  226. timer.record("apply half()")
  227. devices.dtype_unet = model.model.diffusion_model.dtype
  228. devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
  229. model.first_stage_model.to(devices.dtype_vae)
  230. timer.record("apply dtype to VAE")
  231. # clean up cache if limit is reached
  232. while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
  233. checkpoints_loaded.popitem(last=False)
  234. model.sd_model_hash = sd_model_hash
  235. model.sd_model_checkpoint = checkpoint_info.filename
  236. model.sd_checkpoint_info = checkpoint_info
  237. shared.opts.data["sd_checkpoint_hash"] = checkpoint_info.sha256
  238. model.logvar = model.logvar.to(devices.device) # fix for training
  239. sd_vae.delete_base_vae()
  240. sd_vae.clear_loaded_vae()
  241. vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename)
  242. sd_vae.load_vae(model, vae_file, vae_source)
  243. timer.record("load VAE")
  244. def enable_midas_autodownload():
  245. """
  246. Gives the ldm.modules.midas.api.load_model function automatic downloading.
  247. When the 512-depth-ema model, and other future models like it, is loaded,
  248. it calls midas.api.load_model to load the associated midas depth model.
  249. This function applies a wrapper to download the model to the correct
  250. location automatically.
  251. """
  252. midas_path = os.path.join(paths.models_path, 'midas')
  253. # stable-diffusion-stability-ai hard-codes the midas model path to
  254. # a location that differs from where other scripts using this model look.
  255. # HACK: Overriding the path here.
  256. for k, v in midas.api.ISL_PATHS.items():
  257. file_name = os.path.basename(v)
  258. midas.api.ISL_PATHS[k] = os.path.join(midas_path, file_name)
  259. midas_urls = {
  260. "dpt_large": "https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
  261. "dpt_hybrid": "https://github.com/intel-isl/DPT/releases/download/1_0/dpt_hybrid-midas-501f0c75.pt",
  262. "midas_v21": "https://github.com/AlexeyAB/MiDaS/releases/download/midas_dpt/midas_v21-f6b98070.pt",
  263. "midas_v21_small": "https://github.com/AlexeyAB/MiDaS/releases/download/midas_dpt/midas_v21_small-70d6b9c8.pt",
  264. }
  265. midas.api.load_model_inner = midas.api.load_model
  266. def load_model_wrapper(model_type):
  267. path = midas.api.ISL_PATHS[model_type]
  268. if not os.path.exists(path):
  269. if not os.path.exists(midas_path):
  270. mkdir(midas_path)
  271. print(f"Downloading midas model weights for {model_type} to {path}")
  272. request.urlretrieve(midas_urls[model_type], path)
  273. print(f"{model_type} downloaded")
  274. return midas.api.load_model_inner(model_type)
  275. midas.api.load_model = load_model_wrapper
  276. def repair_config(sd_config):
  277. if not hasattr(sd_config.model.params, "use_ema"):
  278. sd_config.model.params.use_ema = False
  279. if shared.cmd_opts.no_half:
  280. sd_config.model.params.unet_config.params.use_fp16 = False
  281. elif shared.cmd_opts.upcast_sampling:
  282. sd_config.model.params.unet_config.params.use_fp16 = True
  283. if getattr(sd_config.model.params.first_stage_config.params.ddconfig, "attn_type", None) == "vanilla-xformers" and not shared.xformers_available:
  284. sd_config.model.params.first_stage_config.params.ddconfig.attn_type = "vanilla"
  285. # For UnCLIP-L, override the hardcoded karlo directory
  286. if hasattr(sd_config.model.params, "noise_aug_config") and hasattr(sd_config.model.params.noise_aug_config.params, "clip_stats_path"):
  287. karlo_path = os.path.join(paths.models_path, 'karlo')
  288. sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path)
  289. sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight'
  290. sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight'
  291. class SdModelData:
  292. def __init__(self):
  293. self.sd_model = None
  294. self.was_loaded_at_least_once = False
  295. self.lock = threading.Lock()
  296. def get_sd_model(self):
  297. if self.was_loaded_at_least_once:
  298. return self.sd_model
  299. if self.sd_model is None:
  300. with self.lock:
  301. if self.sd_model is not None or self.was_loaded_at_least_once:
  302. return self.sd_model
  303. try:
  304. load_model()
  305. except Exception as e:
  306. errors.display(e, "loading stable diffusion model", full_traceback=True)
  307. print("", file=sys.stderr)
  308. print("Stable diffusion model failed to load", file=sys.stderr)
  309. self.sd_model = None
  310. return self.sd_model
  311. def set_sd_model(self, v):
  312. self.sd_model = v
  313. model_data = SdModelData()
  314. def load_model(checkpoint_info=None, already_loaded_state_dict=None):
  315. from modules import lowvram, sd_hijack
  316. checkpoint_info = checkpoint_info or select_checkpoint()
  317. if model_data.sd_model:
  318. sd_hijack.model_hijack.undo_hijack(model_data.sd_model)
  319. model_data.sd_model = None
  320. gc.collect()
  321. devices.torch_gc()
  322. do_inpainting_hijack()
  323. timer = Timer()
  324. if already_loaded_state_dict is not None:
  325. state_dict = already_loaded_state_dict
  326. else:
  327. state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
  328. checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
  329. clip_is_included_into_sd = sd1_clip_weight in state_dict or sd2_clip_weight in state_dict
  330. timer.record("find config")
  331. sd_config = OmegaConf.load(checkpoint_config)
  332. repair_config(sd_config)
  333. timer.record("load config")
  334. print(f"Creating model from config: {checkpoint_config}")
  335. sd_model = None
  336. try:
  337. with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd):
  338. sd_model = instantiate_from_config(sd_config.model)
  339. except Exception:
  340. pass
  341. if sd_model is None:
  342. print('Failed to create model quickly; will retry using slow method.', file=sys.stderr)
  343. sd_model = instantiate_from_config(sd_config.model)
  344. sd_model.used_config = checkpoint_config
  345. timer.record("create model")
  346. load_model_weights(sd_model, checkpoint_info, state_dict, timer)
  347. if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
  348. lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
  349. else:
  350. sd_model.to(shared.device)
  351. timer.record("move model to device")
  352. sd_hijack.model_hijack.hijack(sd_model)
  353. timer.record("hijack")
  354. sd_model.eval()
  355. model_data.sd_model = sd_model
  356. model_data.was_loaded_at_least_once = True
  357. sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model
  358. timer.record("load textual inversion embeddings")
  359. script_callbacks.model_loaded_callback(sd_model)
  360. timer.record("scripts callbacks")
  361. with devices.autocast(), torch.no_grad():
  362. sd_model.cond_stage_model_empty_prompt = sd_model.cond_stage_model([""])
  363. timer.record("calculate empty prompt")
  364. print(f"Model loaded in {timer.summary()}.")
  365. return sd_model
  366. def reload_model_weights(sd_model=None, info=None):
  367. from modules import lowvram, devices, sd_hijack
  368. checkpoint_info = info or select_checkpoint()
  369. if not sd_model:
  370. sd_model = model_data.sd_model
  371. if sd_model is None: # previous model load failed
  372. current_checkpoint_info = None
  373. else:
  374. current_checkpoint_info = sd_model.sd_checkpoint_info
  375. if sd_model.sd_model_checkpoint == checkpoint_info.filename:
  376. return
  377. sd_unet.apply_unet("None")
  378. if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
  379. lowvram.send_everything_to_cpu()
  380. else:
  381. sd_model.to(devices.cpu)
  382. sd_hijack.model_hijack.undo_hijack(sd_model)
  383. timer = Timer()
  384. state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
  385. checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
  386. timer.record("find config")
  387. if sd_model is None or checkpoint_config != sd_model.used_config:
  388. del sd_model
  389. load_model(checkpoint_info, already_loaded_state_dict=state_dict)
  390. return model_data.sd_model
  391. try:
  392. load_model_weights(sd_model, checkpoint_info, state_dict, timer)
  393. except Exception:
  394. print("Failed to load checkpoint, restoring previous")
  395. load_model_weights(sd_model, current_checkpoint_info, None, timer)
  396. raise
  397. finally:
  398. sd_hijack.model_hijack.hijack(sd_model)
  399. timer.record("hijack")
  400. script_callbacks.model_loaded_callback(sd_model)
  401. timer.record("script callbacks")
  402. if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
  403. sd_model.to(devices.device)
  404. timer.record("move model to device")
  405. print(f"Weights loaded in {timer.summary()}.")
  406. return sd_model
  407. def unload_model_weights(sd_model=None, info=None):
  408. from modules import devices, sd_hijack
  409. timer = Timer()
  410. if model_data.sd_model:
  411. model_data.sd_model.to(devices.cpu)
  412. sd_hijack.model_hijack.undo_hijack(model_data.sd_model)
  413. model_data.sd_model = None
  414. sd_model = None
  415. gc.collect()
  416. devices.torch_gc()
  417. torch.cuda.empty_cache()
  418. print(f"Unloaded weights {timer.summary()}.")
  419. return sd_model
  420. def apply_token_merging(sd_model, token_merging_ratio):
  421. """
  422. Applies speed and memory optimizations from tomesd.
  423. """
  424. current_token_merging_ratio = getattr(sd_model, 'applied_token_merged_ratio', 0)
  425. if current_token_merging_ratio == token_merging_ratio:
  426. return
  427. if current_token_merging_ratio > 0:
  428. tomesd.remove_patch(sd_model)
  429. if token_merging_ratio > 0:
  430. tomesd.apply_patch(
  431. sd_model,
  432. ratio=token_merging_ratio,
  433. use_rand=False, # can cause issues with some samplers
  434. merge_attn=True,
  435. merge_crossattn=False,
  436. merge_mlp=False
  437. )
  438. sd_model.applied_token_merged_ratio = token_merging_ratio