lora_script.py 3.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. import torch
  2. import gradio as gr
  3. from fastapi import FastAPI
  4. import lora
  5. import extra_networks_lora
  6. import ui_extra_networks_lora
  7. from modules import script_callbacks, ui_extra_networks, extra_networks, shared
  8. def unload():
  9. torch.nn.Linear.forward = torch.nn.Linear_forward_before_lora
  10. torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_lora
  11. torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_lora
  12. torch.nn.Conv2d._load_from_state_dict = torch.nn.Conv2d_load_state_dict_before_lora
  13. torch.nn.MultiheadAttention.forward = torch.nn.MultiheadAttention_forward_before_lora
  14. torch.nn.MultiheadAttention._load_from_state_dict = torch.nn.MultiheadAttention_load_state_dict_before_lora
  15. def before_ui():
  16. ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora())
  17. extra_networks.register_extra_network(extra_networks_lora.ExtraNetworkLora())
  18. if not hasattr(torch.nn, 'Linear_forward_before_lora'):
  19. torch.nn.Linear_forward_before_lora = torch.nn.Linear.forward
  20. if not hasattr(torch.nn, 'Linear_load_state_dict_before_lora'):
  21. torch.nn.Linear_load_state_dict_before_lora = torch.nn.Linear._load_from_state_dict
  22. if not hasattr(torch.nn, 'Conv2d_forward_before_lora'):
  23. torch.nn.Conv2d_forward_before_lora = torch.nn.Conv2d.forward
  24. if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_lora'):
  25. torch.nn.Conv2d_load_state_dict_before_lora = torch.nn.Conv2d._load_from_state_dict
  26. if not hasattr(torch.nn, 'MultiheadAttention_forward_before_lora'):
  27. torch.nn.MultiheadAttention_forward_before_lora = torch.nn.MultiheadAttention.forward
  28. if not hasattr(torch.nn, 'MultiheadAttention_load_state_dict_before_lora'):
  29. torch.nn.MultiheadAttention_load_state_dict_before_lora = torch.nn.MultiheadAttention._load_from_state_dict
  30. torch.nn.Linear.forward = lora.lora_Linear_forward
  31. torch.nn.Linear._load_from_state_dict = lora.lora_Linear_load_state_dict
  32. torch.nn.Conv2d.forward = lora.lora_Conv2d_forward
  33. torch.nn.Conv2d._load_from_state_dict = lora.lora_Conv2d_load_state_dict
  34. torch.nn.MultiheadAttention.forward = lora.lora_MultiheadAttention_forward
  35. torch.nn.MultiheadAttention._load_from_state_dict = lora.lora_MultiheadAttention_load_state_dict
  36. script_callbacks.on_model_loaded(lora.assign_lora_names_to_compvis_modules)
  37. script_callbacks.on_script_unloaded(unload)
  38. script_callbacks.on_before_ui(before_ui)
  39. script_callbacks.on_infotext_pasted(lora.infotext_pasted)
  40. shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
  41. "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None", *lora.available_loras]}, refresh=lora.list_available_loras),
  42. "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
  43. }))
  44. shared.options_templates.update(shared.options_section(('compatibility', "Compatibility"), {
  45. "lora_functional": shared.OptionInfo(False, "Lora: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"),
  46. }))
  47. def create_lora_json(obj: lora.LoraOnDisk):
  48. return {
  49. "name": obj.name,
  50. "alias": obj.alias,
  51. "path": obj.filename,
  52. "metadata": obj.metadata,
  53. }
  54. def api_loras(_: gr.Blocks, app: FastAPI):
  55. @app.get("/sdapi/v1/loras")
  56. async def get_loras():
  57. return [create_lora_json(obj) for obj in lora.available_loras.values()]
  58. script_callbacks.on_app_started(api_loras)