loader.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. import gc
  2. import json
  3. import os
  4. import re
  5. import time
  6. from pathlib import Path
  7. from peft import PeftModel
  8. from typing import Optional, List, Dict, Tuple, Union
  9. import torch
  10. import transformers
  11. from accelerate import infer_auto_device_map, init_empty_weights
  12. from transformers import (AutoConfig, AutoModel, AutoModelForCausalLM,
  13. AutoTokenizer, BitsAndBytesConfig, LlamaTokenizer)
  14. class LoaderCheckPoint:
  15. """
  16. 加载自定义 model CheckPoint
  17. """
  18. # remote in the model on loader checkpoint
  19. no_remote_model: bool = False
  20. # 模型名称
  21. model_name: str = None
  22. tokenizer: object = None
  23. # 模型全路径
  24. model_path: str = None
  25. model: object = None
  26. model_config: object = None
  27. lora_names: set = []
  28. model_dir: str = None
  29. lora_dir: str = None
  30. ptuning_dir: str = None
  31. use_ptuning_v2: bool = False
  32. cpu: bool = False
  33. gpu_memory: object = None
  34. cpu_memory: object = None
  35. auto_devices: object = True
  36. # 如果开启了8bit量化加载,项目无法启动,参考此位置,选择合适的cuda版本,https://github.com/TimDettmers/bitsandbytes/issues/156
  37. load_in_8bit: bool = False
  38. is_llamacpp: bool = False
  39. bf16: bool = False
  40. params: object = None
  41. # 自定义设备网络
  42. device_map: Optional[Dict[str, int]] = None
  43. # 默认 cuda ,如果不支持cuda使用多卡, 如果不支持多卡 使用cpu
  44. llm_device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
  45. def __init__(self, params: dict = None):
  46. """
  47. 模型初始化
  48. :param params:
  49. """
  50. self.model_path = None
  51. self.params = params or {}
  52. self.no_remote_model = params.get('no_remote_model', False)
  53. self.model_name = params.get('model', '')
  54. self.lora = params.get('lora', '')
  55. self.use_ptuning_v2 = params.get('use_ptuning_v2', False)
  56. self.model = None
  57. self.tokenizer = None
  58. self.model_dir = params.get('model_dir', '')
  59. self.lora_dir = params.get('lora_dir', '')
  60. self.ptuning_dir = params.get('ptuning_dir', '')
  61. self.cpu = params.get('cpu', False)
  62. self.gpu_memory = params.get('gpu_memory', None)
  63. self.cpu_memory = params.get('cpu_memory', None)
  64. self.auto_devices = params.get('auto_devices', True)
  65. self.load_in_8bit = params.get('load_in_8bit', False)
  66. self.bf16 = params.get('bf16', False)
  67. def _load_model_config(self, model_name):
  68. checkpoint = Path(f'{self.model_dir}/{model_name}')
  69. if self.model_path:
  70. checkpoint = Path(f'{self.model_path}')
  71. else:
  72. if not self.no_remote_model:
  73. checkpoint = model_name
  74. model_config = AutoConfig.from_pretrained(checkpoint, trust_remote_code=True)
  75. return model_config
  76. def _load_model(self, model_name):
  77. """
  78. 加载自定义位置的model
  79. :param model_name:
  80. :return:
  81. """
  82. print(f"Loading {model_name}...")
  83. t0 = time.time()
  84. checkpoint = Path(f'{self.model_dir}/{model_name}')
  85. self.is_llamacpp = len(list(checkpoint.glob('ggml*.bin'))) > 0
  86. if self.model_path:
  87. checkpoint = Path(f'{self.model_path}')
  88. else:
  89. if not self.no_remote_model:
  90. checkpoint = model_name
  91. if 'chatglm' in model_name.lower():
  92. LoaderClass = AutoModel
  93. else:
  94. LoaderClass = AutoModelForCausalLM
  95. # Load the model in simple 16-bit mode by default
  96. if not any([self.cpu, self.load_in_8bit, self.auto_devices, self.gpu_memory is not None,
  97. self.cpu_memory is not None, self.is_llamacpp]):
  98. if torch.cuda.is_available() and self.llm_device.lower().startswith("cuda"):
  99. # 根据当前设备GPU数量决定是否进行多卡部署
  100. num_gpus = torch.cuda.device_count()
  101. if num_gpus < 2 and self.device_map is None:
  102. model = (
  103. LoaderClass.from_pretrained(checkpoint,
  104. low_cpu_mem_usage=True,
  105. config=self.model_config,
  106. torch_dtype=torch.bfloat16 if self.bf16 else torch.float16,
  107. trust_remote_code=True)
  108. .half()
  109. .cuda()
  110. )
  111. else:
  112. from accelerate import dispatch_model
  113. model = LoaderClass.from_pretrained(checkpoint,
  114. low_cpu_mem_usage=True,
  115. config=self.model_config,
  116. torch_dtype=torch.bfloat16 if self.bf16 else torch.float16,
  117. trust_remote_code=True).half()
  118. # 可传入device_map自定义每张卡的部署情况
  119. if self.device_map is None:
  120. device_map = self.auto_configure_device_map(num_gpus)
  121. model = dispatch_model(model, device_map=device_map)
  122. else:
  123. print(
  124. "Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n")
  125. model = (
  126. AutoModel.from_pretrained(
  127. checkpoint,
  128. config=self.model_config,
  129. trust_remote_code=True)
  130. .float()
  131. .to(self.llm_device)
  132. )
  133. elif self.is_llamacpp:
  134. from models.extensions.llamacpp_model_alternative import LlamaCppModel
  135. model_file = list(checkpoint.glob('ggml*.bin'))[0]
  136. print(f"llama.cpp weights detected: {model_file}\n")
  137. model, tokenizer = LlamaCppModel.from_pretrained(model_file)
  138. return model, tokenizer
  139. # Custom
  140. else:
  141. params = {"low_cpu_mem_usage": True}
  142. if not any((self.cpu, torch.cuda.is_available(), torch.has_mps)):
  143. print(
  144. "Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n")
  145. self.cpu = True
  146. if self.cpu:
  147. params["torch_dtype"] = torch.float32
  148. else:
  149. params["device_map"] = 'auto'
  150. params["trust_remote_code"] = True
  151. if self.load_in_8bit and any((self.auto_devices, self.gpu_memory)):
  152. params['quantization_config'] = BitsAndBytesConfig(load_in_8bit=True,
  153. llm_int8_enable_fp32_cpu_offload=True)
  154. elif self.load_in_8bit:
  155. params['quantization_config'] = BitsAndBytesConfig(load_in_8bit=True)
  156. elif self.bf16:
  157. params["torch_dtype"] = torch.bfloat16
  158. else:
  159. params["torch_dtype"] = torch.float16
  160. if self.gpu_memory:
  161. memory_map = list(map(lambda x: x.strip(), self.gpu_memory))
  162. max_cpu_memory = self.cpu_memory.strip() if self.cpu_memory is not None else '99GiB'
  163. max_memory = {}
  164. for i in range(len(memory_map)):
  165. max_memory[i] = f'{memory_map[i]}GiB' if not re.match('.*ib$', memory_map[i].lower()) else \
  166. memory_map[i]
  167. max_memory['cpu'] = max_cpu_memory
  168. params['max_memory'] = max_memory
  169. elif self.auto_devices:
  170. total_mem = (torch.cuda.get_device_properties(0).total_memory / (1024 * 1024))
  171. suggestion = round((total_mem - 1000) / 1000) * 1000
  172. if total_mem - suggestion < 800:
  173. suggestion -= 1000
  174. suggestion = int(round(suggestion / 1000))
  175. print(
  176. f"\033[1;32;1mAuto-assiging --gpu-memory {suggestion} for your GPU to try to prevent out-of-memory errors.\nYou can manually set other values.\033[0;37;0m")
  177. max_memory = {0: f'{suggestion}GiB', 'cpu': f'{self.cpu_memory or 99}GiB'}
  178. params['max_memory'] = max_memory
  179. if self.load_in_8bit and params.get('max_memory', None) is not None and params['device_map'] == 'auto':
  180. config = AutoConfig.from_pretrained(checkpoint)
  181. with init_empty_weights():
  182. model = LoaderClass.from_config(config)
  183. model.tie_weights()
  184. if self.device_map is not None:
  185. params['device_map'] = self.device_map
  186. else:
  187. params['device_map'] = infer_auto_device_map(
  188. model,
  189. dtype=torch.int8,
  190. max_memory=params['max_memory'],
  191. no_split_module_classes=model._no_split_modules
  192. )
  193. model = LoaderClass.from_pretrained(checkpoint, **params)
  194. # Loading the tokenizer
  195. if type(model) is transformers.LlamaForCausalLM:
  196. tokenizer = LlamaTokenizer.from_pretrained(checkpoint, clean_up_tokenization_spaces=True)
  197. # Leaving this here until the LLaMA tokenizer gets figured out.
  198. # For some people this fixes things, for others it causes an error.
  199. try:
  200. tokenizer.eos_token_id = 2
  201. tokenizer.bos_token_id = 1
  202. tokenizer.pad_token_id = 0
  203. except:
  204. pass
  205. else:
  206. tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
  207. print(f"Loaded the model in {(time.time() - t0):.2f} seconds.")
  208. return model, tokenizer
  209. def auto_configure_device_map(num_gpus: int) -> Dict[str, int]:
  210. # transformer.word_embeddings 占用1层
  211. # transformer.final_layernorm 和 lm_head 占用1层
  212. # transformer.layers 占用 28 层
  213. # 总共30层分配到num_gpus张卡上
  214. num_trans_layers = 28
  215. per_gpu_layers = 30 / num_gpus
  216. # bugfix: 在linux中调用torch.embedding传入的weight,input不在同一device上,导致RuntimeError
  217. # windows下 model.device 会被设置成 transformer.word_embeddings.device
  218. # linux下 model.device 会被设置成 lm_head.device
  219. # 在调用chat或者stream_chat时,input_ids会被放到model.device上
  220. # 如果transformer.word_embeddings.device和model.device不同,则会导致RuntimeError
  221. # 因此这里将transformer.word_embeddings,transformer.final_layernorm,lm_head都放到第一张卡上
  222. device_map = {'transformer.word_embeddings': 0,
  223. 'transformer.final_layernorm': 0, 'lm_head': 0}
  224. used = 2
  225. gpu_target = 0
  226. for i in range(num_trans_layers):
  227. if used >= per_gpu_layers:
  228. gpu_target += 1
  229. used = 0
  230. assert gpu_target < num_gpus
  231. device_map[f'transformer.layers.{i}'] = gpu_target
  232. used += 1
  233. return device_map
  234. def _add_lora_to_model(self, lora_names):
  235. # 目前加载的lora
  236. prior_set = set(self.lora_names)
  237. # 需要加载的
  238. added_set = set(lora_names) - prior_set
  239. # 删除的lora
  240. removed_set = prior_set - set(lora_names)
  241. self.lora_names = list(lora_names)
  242. # Nothing to do = skip.
  243. if len(added_set) == 0 and len(removed_set) == 0:
  244. return
  245. # Only adding, and already peft? Do it the easy way.
  246. if len(removed_set) == 0 and len(prior_set) > 0:
  247. print(f"Adding the LoRA(s) named {added_set} to the model...")
  248. for lora in added_set:
  249. self.model.load_adapter(Path(f"{self.lora_dir}/{lora}"), lora)
  250. return
  251. # If removing anything, disable all and re-add.
  252. if len(removed_set) > 0:
  253. self.model.disable_adapter()
  254. if len(lora_names) > 0:
  255. print("Applying the following LoRAs to {}: {}".format(self.model_name, ', '.join(lora_names)))
  256. params = {}
  257. if not self.cpu:
  258. params['dtype'] = self.model.dtype
  259. if hasattr(self.model, "hf_device_map"):
  260. params['device_map'] = {"base_model.model." + k: v for k, v in self.model.hf_device_map.items()}
  261. elif self.load_in_8bit:
  262. params['device_map'] = {'': 0}
  263. self.model.resize_token_embeddings(len(self.tokenizer))
  264. self.model = PeftModel.from_pretrained(self.model, Path(f"{self.lora_dir}/{lora_names[0]}"), **params)
  265. for lora in lora_names[1:]:
  266. self.model.load_adapter(Path(f"{self.lora_dir}/{lora}"), lora)
  267. if not self.load_in_8bit and not self.cpu:
  268. if not hasattr(self.model, "hf_device_map"):
  269. if torch.has_mps:
  270. device = torch.device('mps')
  271. self.model = self.model.to(device)
  272. else:
  273. self.model = self.model.cuda()
  274. def clear_torch_cache(self):
  275. gc.collect()
  276. if not self.cpu:
  277. device_id = "0" if torch.cuda.is_available() else None
  278. CUDA_DEVICE = f"{self.llm_device}:{device_id}" if device_id else self.llm_device
  279. with torch.cuda.device(CUDA_DEVICE):
  280. torch.cuda.empty_cache()
  281. torch.cuda.ipc_collect()
  282. def unload_model(self):
  283. self.model = self.tokenizer = None
  284. self.clear_torch_cache()
  285. def set_model_path(self, model_path):
  286. self.model_path = model_path
  287. def reload_model(self):
  288. self.unload_model()
  289. self.model_config = self._load_model_config(self.model_name)
  290. if self.use_ptuning_v2:
  291. try:
  292. prefix_encoder_file = open(Path(f'{self.ptuning_dir}/config.json'), 'r')
  293. prefix_encoder_config = json.loads(prefix_encoder_file.read())
  294. prefix_encoder_file.close()
  295. self.model_config.pre_seq_len = prefix_encoder_config['pre_seq_len']
  296. self.model_config.prefix_projection = prefix_encoder_config['prefix_projection']
  297. except Exception:
  298. print("加载PrefixEncoder config.json失败")
  299. self.model, self.tokenizer = self._load_model(self.model_name)
  300. if self.lora:
  301. self._add_lora_to_model([self.lora])
  302. if self.use_ptuning_v2:
  303. try:
  304. prefix_state_dict = torch.load(Path(f'{self.ptuning_dir}/pytorch_model.bin'))
  305. new_prefix_state_dict = {}
  306. for k, v in prefix_state_dict.items():
  307. if k.startswith("transformer.prefix_encoder."):
  308. new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
  309. self.model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
  310. self.model.transformer.prefix_encoder.float()
  311. except Exception:
  312. print("加载PrefixEncoder模型参数失败")
  313. self.model = self.model.eval()