lora_script.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. import re
  2. import torch
  3. import gradio as gr
  4. from fastapi import FastAPI
  5. import network
  6. import networks
  7. import extra_networks_lora
  8. import ui_extra_networks_lora
  9. from modules import script_callbacks, ui_extra_networks, extra_networks, shared
  10. def unload():
  11. torch.nn.Linear.forward = torch.nn.Linear_forward_before_network
  12. torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_network
  13. torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_network
  14. torch.nn.Conv2d._load_from_state_dict = torch.nn.Conv2d_load_state_dict_before_network
  15. torch.nn.MultiheadAttention.forward = torch.nn.MultiheadAttention_forward_before_network
  16. torch.nn.MultiheadAttention._load_from_state_dict = torch.nn.MultiheadAttention_load_state_dict_before_network
  17. def before_ui():
  18. ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora())
  19. extra_networks.register_extra_network(extra_networks_lora.ExtraNetworkLora())
  20. if not hasattr(torch.nn, 'Linear_forward_before_network'):
  21. torch.nn.Linear_forward_before_network = torch.nn.Linear.forward
  22. if not hasattr(torch.nn, 'Linear_load_state_dict_before_network'):
  23. torch.nn.Linear_load_state_dict_before_network = torch.nn.Linear._load_from_state_dict
  24. if not hasattr(torch.nn, 'Conv2d_forward_before_network'):
  25. torch.nn.Conv2d_forward_before_network = torch.nn.Conv2d.forward
  26. if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_network'):
  27. torch.nn.Conv2d_load_state_dict_before_network = torch.nn.Conv2d._load_from_state_dict
  28. if not hasattr(torch.nn, 'MultiheadAttention_forward_before_network'):
  29. torch.nn.MultiheadAttention_forward_before_network = torch.nn.MultiheadAttention.forward
  30. if not hasattr(torch.nn, 'MultiheadAttention_load_state_dict_before_network'):
  31. torch.nn.MultiheadAttention_load_state_dict_before_network = torch.nn.MultiheadAttention._load_from_state_dict
  32. torch.nn.Linear.forward = networks.network_Linear_forward
  33. torch.nn.Linear._load_from_state_dict = networks.network_Linear_load_state_dict
  34. torch.nn.Conv2d.forward = networks.network_Conv2d_forward
  35. torch.nn.Conv2d._load_from_state_dict = networks.network_Conv2d_load_state_dict
  36. torch.nn.MultiheadAttention.forward = networks.network_MultiheadAttention_forward
  37. torch.nn.MultiheadAttention._load_from_state_dict = networks.network_MultiheadAttention_load_state_dict
  38. script_callbacks.on_model_loaded(networks.assign_network_names_to_compvis_modules)
  39. script_callbacks.on_script_unloaded(unload)
  40. script_callbacks.on_before_ui(before_ui)
  41. script_callbacks.on_infotext_pasted(networks.infotext_pasted)
  42. shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
  43. "sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
  44. "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
  45. "lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
  46. }))
  47. shared.options_templates.update(shared.options_section(('compatibility', "Compatibility"), {
  48. "lora_functional": shared.OptionInfo(False, "Lora/Networks: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"),
  49. }))
  50. def create_lora_json(obj: network.NetworkOnDisk):
  51. return {
  52. "name": obj.name,
  53. "alias": obj.alias,
  54. "path": obj.filename,
  55. "metadata": obj.metadata,
  56. }
  57. def api_networks(_: gr.Blocks, app: FastAPI):
  58. @app.get("/sdapi/v1/loras")
  59. async def get_loras():
  60. return [create_lora_json(obj) for obj in networks.available_networks.values()]
  61. @app.post("/sdapi/v1/refresh-loras")
  62. async def refresh_loras():
  63. return networks.list_available_networks()
  64. script_callbacks.on_app_started(api_networks)
  65. re_lora = re.compile("<lora:([^:]+):")
  66. def infotext_pasted(infotext, d):
  67. hashes = d.get("Lora hashes")
  68. if not hashes:
  69. return
  70. hashes = [x.strip().split(':', 1) for x in hashes.split(",")]
  71. hashes = {x[0].strip().replace(",", ""): x[1].strip() for x in hashes}
  72. def network_replacement(m):
  73. alias = m.group(1)
  74. shorthash = hashes.get(alias)
  75. if shorthash is None:
  76. return m.group(0)
  77. network_on_disk = networks.available_network_hash_lookup.get(shorthash)
  78. if network_on_disk is None:
  79. return m.group(0)
  80. return f'<lora:{network_on_disk.get_alias()}:'
  81. d["Prompt"] = re.sub(re_lora, network_replacement, d["Prompt"])
  82. script_callbacks.on_infotext_pasted(infotext_pasted)