ui.py 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597
  1. import json
  2. import mimetypes
  3. import os
  4. import sys
  5. from functools import reduce
  6. import warnings
  7. import gradio as gr
  8. import gradio.utils
  9. import numpy as np
  10. from PIL import Image, PngImagePlugin # noqa: F401
  11. from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
  12. from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings
  13. from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
  14. from modules.paths import script_path
  15. from modules.ui_common import create_refresh_button
  16. from modules.ui_gradio_extensions import reload_javascript
  17. from modules.shared import opts, cmd_opts
  18. import modules.codeformer_model
  19. import modules.generation_parameters_copypaste as parameters_copypaste
  20. import modules.gfpgan_model
  21. import modules.hypernetworks.ui
  22. import modules.scripts
  23. import modules.shared as shared
  24. import modules.styles
  25. import modules.textual_inversion.ui
  26. from modules import prompt_parser
  27. from modules.sd_hijack import model_hijack
  28. from modules.sd_samplers import samplers, samplers_for_img2img
  29. from modules.textual_inversion import textual_inversion
  30. import modules.hypernetworks.ui
  31. from modules.generation_parameters_copypaste import image_from_url_text
  32. import modules.extras
  33. create_setting_component = ui_settings.create_setting_component
  34. warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning)
  35. # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
  36. mimetypes.init()
  37. mimetypes.add_type('application/javascript', '.js')
  38. if not cmd_opts.share and not cmd_opts.listen:
  39. # fix gradio phoning home
  40. gradio.utils.version_check = lambda: None
  41. gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
  42. if cmd_opts.ngrok is not None:
  43. import modules.ngrok as ngrok
  44. print('ngrok authtoken detected, trying to connect...')
  45. ngrok.connect(
  46. cmd_opts.ngrok,
  47. cmd_opts.port if cmd_opts.port is not None else 7860,
  48. cmd_opts.ngrok_options
  49. )
  50. def gr_show(visible=True):
  51. return {"visible": visible, "__type__": "update"}
  52. sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
  53. sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
  54. # Using constants for these since the variation selector isn't visible.
  55. # Important that they exactly match script.js for tooltip to work.
  56. random_symbol = '\U0001f3b2\ufe0f' # 🎲️
  57. reuse_symbol = '\u267b\ufe0f' # ♻️
  58. paste_symbol = '\u2199\ufe0f' # ↙
  59. refresh_symbol = '\U0001f504' # 🔄
  60. save_style_symbol = '\U0001f4be' # 💾
  61. apply_style_symbol = '\U0001f4cb' # 📋
  62. clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️
  63. extra_networks_symbol = '\U0001F3B4' # 🎴
  64. switch_values_symbol = '\U000021C5' # ⇅
  65. restore_progress_symbol = '\U0001F300' # 🌀
  66. detect_image_size_symbol = '\U0001F4D0' # 📐
  67. def plaintext_to_html(text):
  68. return ui_common.plaintext_to_html(text)
  69. def send_gradio_gallery_to_image(x):
  70. if len(x) == 0:
  71. return None
  72. return image_from_url_text(x[0])
  73. def add_style(name: str, prompt: str, negative_prompt: str):
  74. if name is None:
  75. return [gr_show() for x in range(4)]
  76. style = modules.styles.PromptStyle(name, prompt, negative_prompt)
  77. shared.prompt_styles.styles[style.name] = style
  78. # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
  79. # reserialize all styles every time we save them
  80. shared.prompt_styles.save_styles(shared.styles_filename)
  81. return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)]
  82. def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y):
  83. from modules import processing, devices
  84. if not enable:
  85. return ""
  86. p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y)
  87. with devices.autocast():
  88. p.init([""], [0], [0])
  89. return f"resize: from <span class='resolution'>{p.width}x{p.height}</span> to <span class='resolution'>{p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}</span>"
  90. def resize_from_to_html(width, height, scale_by):
  91. target_width = int(width * scale_by)
  92. target_height = int(height * scale_by)
  93. if not target_width or not target_height:
  94. return "no image selected"
  95. return f"resize: from <span class='resolution'>{width}x{height}</span> to <span class='resolution'>{target_width}x{target_height}</span>"
  96. def apply_styles(prompt, prompt_neg, styles):
  97. prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles)
  98. prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles)
  99. return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])]
  100. def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles):
  101. if mode in {0, 1, 3, 4}:
  102. return [interrogation_function(ii_singles[mode]), None]
  103. elif mode == 2:
  104. return [interrogation_function(ii_singles[mode]["image"]), None]
  105. elif mode == 5:
  106. assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
  107. images = shared.listfiles(ii_input_dir)
  108. print(f"Will process {len(images)} images.")
  109. if ii_output_dir != "":
  110. os.makedirs(ii_output_dir, exist_ok=True)
  111. else:
  112. ii_output_dir = ii_input_dir
  113. for image in images:
  114. img = Image.open(image)
  115. filename = os.path.basename(image)
  116. left, _ = os.path.splitext(filename)
  117. print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a'))
  118. return [gr.update(), None]
  119. def interrogate(image):
  120. prompt = shared.interrogator.interrogate(image.convert("RGB"))
  121. return gr.update() if prompt is None else prompt
  122. def interrogate_deepbooru(image):
  123. prompt = deepbooru.model.tag(image)
  124. return gr.update() if prompt is None else prompt
  125. def create_seed_inputs(target_interface):
  126. with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"):
  127. seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed")
  128. seed.style(container=False)
  129. random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed')
  130. reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed')
  131. seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False)
  132. # Components to show/hide based on the 'Extra' checkbox
  133. seed_extras = []
  134. with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1:
  135. seed_extras.append(seed_extra_row_1)
  136. subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed")
  137. subseed.style(container=False)
  138. random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed")
  139. reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed")
  140. subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength")
  141. with FormRow(visible=False) as seed_extra_row_2:
  142. seed_extras.append(seed_extra_row_2)
  143. seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w")
  144. seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h")
  145. random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[])
  146. random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[])
  147. def change_visibility(show):
  148. return {comp: gr_show(show) for comp in seed_extras}
  149. seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
  150. return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
  151. def connect_clear_prompt(button):
  152. """Given clear button, prompt, and token_counter objects, setup clear prompt button click event"""
  153. button.click(
  154. _js="clear_prompt",
  155. fn=None,
  156. inputs=[],
  157. outputs=[],
  158. )
  159. def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
  160. """ Connects a 'reuse (sub)seed' button's click event so that it copies last used
  161. (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
  162. was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
  163. def copy_seed(gen_info_string: str, index):
  164. res = -1
  165. try:
  166. gen_info = json.loads(gen_info_string)
  167. index -= gen_info.get('index_of_first_image', 0)
  168. if is_subseed and gen_info.get('subseed_strength', 0) > 0:
  169. all_subseeds = gen_info.get('all_subseeds', [-1])
  170. res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
  171. else:
  172. all_seeds = gen_info.get('all_seeds', [-1])
  173. res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
  174. except json.decoder.JSONDecodeError:
  175. if gen_info_string:
  176. errors.report(f"Error parsing JSON generation info: {gen_info_string}")
  177. return [res, gr_show(False)]
  178. reuse_seed.click(
  179. fn=copy_seed,
  180. _js="(x, y) => [x, selected_gallery_index()]",
  181. show_progress=False,
  182. inputs=[generation_info, dummy_component],
  183. outputs=[seed, dummy_component]
  184. )
  185. def update_token_counter(text, steps):
  186. try:
  187. text, _ = extra_networks.parse_prompt(text)
  188. _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text])
  189. prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps)
  190. except Exception:
  191. # a parsing error can happen here during typing, and we don't want to bother the user with
  192. # messages related to it in console
  193. prompt_schedules = [[[steps, text]]]
  194. flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
  195. prompts = [prompt_text for step, prompt_text in flat_prompts]
  196. token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0])
  197. return f"<span class='gr-box gr-text-input'>{token_count}/{max_length}</span>"
  198. def create_toprow(is_img2img):
  199. id_part = "img2img" if is_img2img else "txt2img"
  200. with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"):
  201. with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6):
  202. with gr.Row():
  203. with gr.Column(scale=80):
  204. with gr.Row():
  205. prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"])
  206. with gr.Row():
  207. with gr.Column(scale=80):
  208. with gr.Row():
  209. negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"])
  210. button_interrogate = None
  211. button_deepbooru = None
  212. if is_img2img:
  213. with gr.Column(scale=1, elem_classes="interrogate-col"):
  214. button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate")
  215. button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru")
  216. with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"):
  217. with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"):
  218. interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt")
  219. skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip")
  220. submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
  221. skip.click(
  222. fn=lambda: shared.state.skip(),
  223. inputs=[],
  224. outputs=[],
  225. )
  226. interrupt.click(
  227. fn=lambda: shared.state.interrupt(),
  228. inputs=[],
  229. outputs=[],
  230. )
  231. with gr.Row(elem_id=f"{id_part}_tools"):
  232. paste = ToolButton(value=paste_symbol, elem_id="paste")
  233. clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt")
  234. extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks")
  235. prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply")
  236. save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create")
  237. restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False)
  238. token_counter = gr.HTML(value="<span>0/75</span>", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"])
  239. token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
  240. negative_token_counter = gr.HTML(value="<span>0/75</span>", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"])
  241. negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button")
  242. clear_prompt_button.click(
  243. fn=lambda *x: x,
  244. _js="confirm_clear_prompt",
  245. inputs=[prompt, negative_prompt],
  246. outputs=[prompt, negative_prompt],
  247. )
  248. with gr.Row(elem_id=f"{id_part}_styles_row"):
  249. prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True)
  250. create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles")
  251. return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button
  252. def setup_progressbar(*args, **kwargs):
  253. pass
  254. def apply_setting(key, value):
  255. if value is None:
  256. return gr.update()
  257. if shared.cmd_opts.freeze_settings:
  258. return gr.update()
  259. # dont allow model to be swapped when model hash exists in prompt
  260. if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap:
  261. return gr.update()
  262. if key == "sd_model_checkpoint":
  263. ckpt_info = sd_models.get_closet_checkpoint_match(value)
  264. if ckpt_info is not None:
  265. value = ckpt_info.title
  266. else:
  267. return gr.update()
  268. comp_args = opts.data_labels[key].component_args
  269. if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
  270. return
  271. valtype = type(opts.data_labels[key].default)
  272. oldval = opts.data.get(key, None)
  273. opts.data[key] = valtype(value) if valtype != type(None) else value
  274. if oldval != value and opts.data_labels[key].onchange is not None:
  275. opts.data_labels[key].onchange()
  276. opts.save(shared.config_filename)
  277. return getattr(opts, key)
  278. def create_output_panel(tabname, outdir):
  279. return ui_common.create_output_panel(tabname, outdir)
  280. def create_sampler_and_steps_selection(choices, tabname):
  281. if opts.samplers_in_dropdown:
  282. with FormRow(elem_id=f"sampler_selection_{tabname}"):
  283. sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
  284. steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
  285. else:
  286. with FormGroup(elem_id=f"sampler_selection_{tabname}"):
  287. steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
  288. sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
  289. return steps, sampler_index
  290. def ordered_ui_categories():
  291. user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder)}
  292. for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)):
  293. yield category
  294. def create_override_settings_dropdown(tabname, row):
  295. dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True)
  296. dropdown.change(
  297. fn=lambda x: gr.Dropdown.update(visible=len(x) > 0),
  298. inputs=[dropdown],
  299. outputs=[dropdown],
  300. )
  301. return dropdown
  302. def create_ui():
  303. import modules.img2img
  304. import modules.txt2img
  305. reload_javascript()
  306. parameters_copypaste.reset()
  307. modules.scripts.scripts_current = modules.scripts.scripts_txt2img
  308. modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False)
  309. with gr.Blocks(analytics_enabled=False) as txt2img_interface:
  310. txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False)
  311. dummy_component = gr.Label(visible=False)
  312. txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False)
  313. with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks:
  314. from modules import ui_extra_networks
  315. extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img')
  316. with gr.Row().style(equal_height=False):
  317. with gr.Column(variant='compact', elem_id="txt2img_settings"):
  318. modules.scripts.scripts_txt2img.prepare_ui()
  319. for category in ordered_ui_categories():
  320. if category == "sampler":
  321. steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img")
  322. elif category == "dimensions":
  323. with FormRow():
  324. with gr.Column(elem_id="txt2img_column_size", scale=4):
  325. width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width")
  326. height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
  327. with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"):
  328. res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims")
  329. if opts.dimensions_and_batch_together:
  330. with gr.Column(elem_id="txt2img_column_batch"):
  331. batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
  332. batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
  333. elif category == "cfg":
  334. cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale")
  335. elif category == "seed":
  336. seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img')
  337. elif category == "checkboxes":
  338. with FormRow(elem_classes="checkboxes-row", variant="compact"):
  339. restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
  340. tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
  341. enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
  342. hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False)
  343. elif category == "hires_fix":
  344. with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options:
  345. with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"):
  346. hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
  347. hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps")
  348. denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
  349. with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"):
  350. hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
  351. hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x")
  352. hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y")
  353. with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container:
  354. hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index")
  355. with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container:
  356. with gr.Column(scale=80):
  357. with gr.Row():
  358. hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
  359. with gr.Column(scale=80):
  360. with gr.Row():
  361. hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
  362. elif category == "batch":
  363. if not opts.dimensions_and_batch_together:
  364. with FormRow(elem_id="txt2img_column_batch"):
  365. batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
  366. batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
  367. elif category == "override_settings":
  368. with FormRow(elem_id="txt2img_override_settings_row") as row:
  369. override_settings = create_override_settings_dropdown('txt2img', row)
  370. elif category == "scripts":
  371. with FormGroup(elem_id="txt2img_script_container"):
  372. custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
  373. else:
  374. modules.scripts.scripts_txt2img.setup_ui_for_section(category)
  375. hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y]
  376. for component in hr_resolution_preview_inputs:
  377. event = component.release if isinstance(component, gr.Slider) else component.change
  378. event(
  379. fn=calc_resolution_hires,
  380. inputs=hr_resolution_preview_inputs,
  381. outputs=[hr_final_resolution],
  382. show_progress=False,
  383. )
  384. event(
  385. None,
  386. _js="onCalcResolutionHires",
  387. inputs=hr_resolution_preview_inputs,
  388. outputs=[],
  389. show_progress=False,
  390. )
  391. txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples)
  392. connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
  393. connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
  394. txt2img_args = dict(
  395. fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']),
  396. _js="submit",
  397. inputs=[
  398. dummy_component,
  399. txt2img_prompt,
  400. txt2img_negative_prompt,
  401. txt2img_prompt_styles,
  402. steps,
  403. sampler_index,
  404. restore_faces,
  405. tiling,
  406. batch_count,
  407. batch_size,
  408. cfg_scale,
  409. seed,
  410. subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
  411. height,
  412. width,
  413. enable_hr,
  414. denoising_strength,
  415. hr_scale,
  416. hr_upscaler,
  417. hr_second_pass_steps,
  418. hr_resize_x,
  419. hr_resize_y,
  420. hr_sampler_index,
  421. hr_prompt,
  422. hr_negative_prompt,
  423. override_settings,
  424. ] + custom_inputs,
  425. outputs=[
  426. txt2img_gallery,
  427. generation_info,
  428. html_info,
  429. html_log,
  430. ],
  431. show_progress=False,
  432. )
  433. txt2img_prompt.submit(**txt2img_args)
  434. submit.click(**txt2img_args)
  435. res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False)
  436. restore_progress_button.click(
  437. fn=progress.restore_progress,
  438. _js="restoreProgressTxt2img",
  439. inputs=[dummy_component],
  440. outputs=[
  441. txt2img_gallery,
  442. generation_info,
  443. html_info,
  444. html_log,
  445. ],
  446. show_progress=False,
  447. )
  448. txt_prompt_img.change(
  449. fn=modules.images.image_data,
  450. inputs=[
  451. txt_prompt_img
  452. ],
  453. outputs=[
  454. txt2img_prompt,
  455. txt_prompt_img
  456. ],
  457. show_progress=False,
  458. )
  459. enable_hr.change(
  460. fn=lambda x: gr_show(x),
  461. inputs=[enable_hr],
  462. outputs=[hr_options],
  463. show_progress = False,
  464. )
  465. txt2img_paste_fields = [
  466. (txt2img_prompt, "Prompt"),
  467. (txt2img_negative_prompt, "Negative prompt"),
  468. (steps, "Steps"),
  469. (sampler_index, "Sampler"),
  470. (restore_faces, "Face restoration"),
  471. (cfg_scale, "CFG scale"),
  472. (seed, "Seed"),
  473. (width, "Size-1"),
  474. (height, "Size-2"),
  475. (batch_size, "Batch size"),
  476. (subseed, "Variation seed"),
  477. (subseed_strength, "Variation seed strength"),
  478. (seed_resize_from_w, "Seed resize from-1"),
  479. (seed_resize_from_h, "Seed resize from-2"),
  480. (denoising_strength, "Denoising strength"),
  481. (enable_hr, lambda d: "Denoising strength" in d),
  482. (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
  483. (hr_scale, "Hires upscale"),
  484. (hr_upscaler, "Hires upscaler"),
  485. (hr_second_pass_steps, "Hires steps"),
  486. (hr_resize_x, "Hires resize-1"),
  487. (hr_resize_y, "Hires resize-2"),
  488. (hr_sampler_index, "Hires sampler"),
  489. (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()),
  490. (hr_prompt, "Hires prompt"),
  491. (hr_negative_prompt, "Hires negative prompt"),
  492. (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()),
  493. *modules.scripts.scripts_txt2img.infotext_fields
  494. ]
  495. parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings)
  496. parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
  497. paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None,
  498. ))
  499. txt2img_preview_params = [
  500. txt2img_prompt,
  501. txt2img_negative_prompt,
  502. steps,
  503. sampler_index,
  504. cfg_scale,
  505. seed,
  506. width,
  507. height,
  508. ]
  509. token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter])
  510. negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter])
  511. ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery)
  512. modules.scripts.scripts_current = modules.scripts.scripts_img2img
  513. modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True)
  514. with gr.Blocks(analytics_enabled=False) as img2img_interface:
  515. img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True)
  516. img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False)
  517. with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks:
  518. from modules import ui_extra_networks
  519. extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img')
  520. with FormRow().style(equal_height=False):
  521. with gr.Column(variant='compact', elem_id="img2img_settings"):
  522. copy_image_buttons = []
  523. copy_image_destinations = {}
  524. def add_copy_image_controls(tab_name, elem):
  525. with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"):
  526. gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}")
  527. for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']):
  528. if name == tab_name:
  529. gr.Button(title, interactive=False)
  530. copy_image_destinations[name] = elem
  531. continue
  532. button = gr.Button(title)
  533. copy_image_buttons.append((button, name, elem))
  534. with gr.Tabs(elem_id="mode_img2img"):
  535. img2img_selected_tab = gr.State(0)
  536. with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img:
  537. init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height)
  538. add_copy_image_controls('img2img', init_img)
  539. with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch:
  540. sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height)
  541. add_copy_image_controls('sketch', sketch)
  542. with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint:
  543. init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height)
  544. add_copy_image_controls('inpaint', init_img_with_mask)
  545. with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color:
  546. inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height)
  547. inpaint_color_sketch_orig = gr.State(None)
  548. add_copy_image_controls('inpaint_sketch', inpaint_color_sketch)
  549. def update_orig(image, state):
  550. if image is not None:
  551. same_size = state is not None and state.size == image.size
  552. has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1))
  553. edited = same_size and has_exact_match
  554. return image if not edited or state is None else state
  555. inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig)
  556. with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload:
  557. init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base")
  558. init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask")
  559. with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
  560. hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
  561. gr.HTML(
  562. "<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
  563. "<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
  564. f"<br>Add inpaint batch mask directory to enable inpaint batch processing."
  565. f"{hidden}</p>"
  566. )
  567. img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
  568. img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
  569. img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
  570. img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch]
  571. for i, tab in enumerate(img2img_tabs):
  572. tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab])
  573. def copy_image(img):
  574. if isinstance(img, dict) and 'image' in img:
  575. return img['image']
  576. return img
  577. for button, name, elem in copy_image_buttons:
  578. button.click(
  579. fn=copy_image,
  580. inputs=[elem],
  581. outputs=[copy_image_destinations[name]],
  582. )
  583. button.click(
  584. fn=lambda: None,
  585. _js=f"switch_to_{name.replace(' ', '_')}",
  586. inputs=[],
  587. outputs=[],
  588. )
  589. with FormRow():
  590. resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
  591. modules.scripts.scripts_img2img.prepare_ui()
  592. for category in ordered_ui_categories():
  593. if category == "sampler":
  594. steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img")
  595. elif category == "dimensions":
  596. with FormRow():
  597. with gr.Column(elem_id="img2img_column_size", scale=4):
  598. selected_scale_tab = gr.State(value=0)
  599. with gr.Tabs():
  600. with gr.Tab(label="Resize to") as tab_scale_to:
  601. with FormRow():
  602. with gr.Column(elem_id="img2img_column_size", scale=4):
  603. width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
  604. height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
  605. with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"):
  606. res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn")
  607. detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn")
  608. with gr.Tab(label="Resize by") as tab_scale_by:
  609. scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale")
  610. with FormRow():
  611. scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview")
  612. gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider")
  613. button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to")
  614. on_change_args = dict(
  615. fn=resize_from_to_html,
  616. _js="currentImg2imgSourceResolution",
  617. inputs=[dummy_component, dummy_component, scale_by],
  618. outputs=scale_by_html,
  619. show_progress=False,
  620. )
  621. scale_by.release(**on_change_args)
  622. button_update_resize_to.click(**on_change_args)
  623. # the code below is meant to update the resolution label after the image in the image selection UI has changed.
  624. # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests.
  625. # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs.
  626. for component in [init_img, sketch]:
  627. component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False)
  628. tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab])
  629. tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab])
  630. if opts.dimensions_and_batch_together:
  631. with gr.Column(elem_id="img2img_column_batch"):
  632. batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
  633. batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
  634. elif category == "cfg":
  635. with FormGroup():
  636. with FormRow():
  637. cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
  638. image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False)
  639. denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
  640. elif category == "seed":
  641. seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img')
  642. elif category == "checkboxes":
  643. with FormRow(elem_classes="checkboxes-row", variant="compact"):
  644. restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
  645. tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
  646. elif category == "batch":
  647. if not opts.dimensions_and_batch_together:
  648. with FormRow(elem_id="img2img_column_batch"):
  649. batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
  650. batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
  651. elif category == "override_settings":
  652. with FormRow(elem_id="img2img_override_settings_row") as row:
  653. override_settings = create_override_settings_dropdown('img2img', row)
  654. elif category == "scripts":
  655. with FormGroup(elem_id="img2img_script_container"):
  656. custom_inputs = modules.scripts.scripts_img2img.setup_ui()
  657. elif category == "inpaint":
  658. with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls:
  659. with FormRow():
  660. mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur")
  661. mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha")
  662. with FormRow():
  663. inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode")
  664. with FormRow():
  665. inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill")
  666. with FormRow():
  667. with gr.Column():
  668. inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res")
  669. with gr.Column(scale=4):
  670. inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding")
  671. def select_img2img_tab(tab):
  672. return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3),
  673. for i, elem in enumerate(img2img_tabs):
  674. elem.select(
  675. fn=lambda tab=i: select_img2img_tab(tab),
  676. inputs=[],
  677. outputs=[inpaint_controls, mask_alpha],
  678. )
  679. else:
  680. modules.scripts.scripts_img2img.setup_ui_for_section(category)
  681. img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
  682. connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
  683. connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
  684. img2img_prompt_img.change(
  685. fn=modules.images.image_data,
  686. inputs=[
  687. img2img_prompt_img
  688. ],
  689. outputs=[
  690. img2img_prompt,
  691. img2img_prompt_img
  692. ],
  693. show_progress=False,
  694. )
  695. img2img_args = dict(
  696. fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']),
  697. _js="submit_img2img",
  698. inputs=[
  699. dummy_component,
  700. dummy_component,
  701. img2img_prompt,
  702. img2img_negative_prompt,
  703. img2img_prompt_styles,
  704. init_img,
  705. sketch,
  706. init_img_with_mask,
  707. inpaint_color_sketch,
  708. inpaint_color_sketch_orig,
  709. init_img_inpaint,
  710. init_mask_inpaint,
  711. steps,
  712. sampler_index,
  713. mask_blur,
  714. mask_alpha,
  715. inpainting_fill,
  716. restore_faces,
  717. tiling,
  718. batch_count,
  719. batch_size,
  720. cfg_scale,
  721. image_cfg_scale,
  722. denoising_strength,
  723. seed,
  724. subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
  725. selected_scale_tab,
  726. height,
  727. width,
  728. scale_by,
  729. resize_mode,
  730. inpaint_full_res,
  731. inpaint_full_res_padding,
  732. inpainting_mask_invert,
  733. img2img_batch_input_dir,
  734. img2img_batch_output_dir,
  735. img2img_batch_inpaint_mask_dir,
  736. override_settings,
  737. ] + custom_inputs,
  738. outputs=[
  739. img2img_gallery,
  740. generation_info,
  741. html_info,
  742. html_log,
  743. ],
  744. show_progress=False,
  745. )
  746. interrogate_args = dict(
  747. _js="get_img2img_tab_index",
  748. inputs=[
  749. dummy_component,
  750. img2img_batch_input_dir,
  751. img2img_batch_output_dir,
  752. init_img,
  753. sketch,
  754. init_img_with_mask,
  755. inpaint_color_sketch,
  756. init_img_inpaint,
  757. ],
  758. outputs=[img2img_prompt, dummy_component],
  759. )
  760. img2img_prompt.submit(**img2img_args)
  761. submit.click(**img2img_args)
  762. res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False)
  763. detect_image_size_btn.click(
  764. fn=lambda w, h, _: (w or gr.update(), h or gr.update()),
  765. _js="currentImg2imgSourceResolution",
  766. inputs=[dummy_component, dummy_component, dummy_component],
  767. outputs=[width, height],
  768. show_progress=False,
  769. )
  770. restore_progress_button.click(
  771. fn=progress.restore_progress,
  772. _js="restoreProgressImg2img",
  773. inputs=[dummy_component],
  774. outputs=[
  775. img2img_gallery,
  776. generation_info,
  777. html_info,
  778. html_log,
  779. ],
  780. show_progress=False,
  781. )
  782. img2img_interrogate.click(
  783. fn=lambda *args: process_interrogate(interrogate, *args),
  784. **interrogate_args,
  785. )
  786. img2img_deepbooru.click(
  787. fn=lambda *args: process_interrogate(interrogate_deepbooru, *args),
  788. **interrogate_args,
  789. )
  790. prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
  791. style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles]
  792. style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"]
  793. for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
  794. button.click(
  795. fn=add_style,
  796. _js="ask_for_style_name",
  797. # Have to pass empty dummy component here, because the JavaScript and Python function have to accept
  798. # the same number of parameters, but we only know the style-name after the JavaScript prompt
  799. inputs=[dummy_component, prompt, negative_prompt],
  800. outputs=[txt2img_prompt_styles, img2img_prompt_styles],
  801. )
  802. for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs):
  803. button.click(
  804. fn=apply_styles,
  805. _js=js_func,
  806. inputs=[prompt, negative_prompt, styles],
  807. outputs=[prompt, negative_prompt, styles],
  808. )
  809. token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
  810. negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter])
  811. ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery)
  812. img2img_paste_fields = [
  813. (img2img_prompt, "Prompt"),
  814. (img2img_negative_prompt, "Negative prompt"),
  815. (steps, "Steps"),
  816. (sampler_index, "Sampler"),
  817. (restore_faces, "Face restoration"),
  818. (cfg_scale, "CFG scale"),
  819. (image_cfg_scale, "Image CFG scale"),
  820. (seed, "Seed"),
  821. (width, "Size-1"),
  822. (height, "Size-2"),
  823. (batch_size, "Batch size"),
  824. (subseed, "Variation seed"),
  825. (subseed_strength, "Variation seed strength"),
  826. (seed_resize_from_w, "Seed resize from-1"),
  827. (seed_resize_from_h, "Seed resize from-2"),
  828. (denoising_strength, "Denoising strength"),
  829. (mask_blur, "Mask blur"),
  830. *modules.scripts.scripts_img2img.infotext_fields
  831. ]
  832. parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings)
  833. parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings)
  834. parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
  835. paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None,
  836. ))
  837. modules.scripts.scripts_current = None
  838. with gr.Blocks(analytics_enabled=False) as extras_interface:
  839. ui_postprocessing.create_ui()
  840. with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
  841. with gr.Row().style(equal_height=False):
  842. with gr.Column(variant='panel'):
  843. image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
  844. with gr.Column(variant='panel'):
  845. html = gr.HTML()
  846. generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info")
  847. html2 = gr.HTML()
  848. with gr.Row():
  849. buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"])
  850. for tabname, button in buttons.items():
  851. parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
  852. paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image,
  853. ))
  854. image.change(
  855. fn=wrap_gradio_call(modules.extras.run_pnginfo),
  856. inputs=[image],
  857. outputs=[html, generation_info, html2],
  858. )
  859. def update_interp_description(value):
  860. interp_description_css = "<p style='margin-bottom: 2.5em'>{}</p>"
  861. interp_descriptions = {
  862. "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."),
  863. "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"),
  864. "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M")
  865. }
  866. return interp_descriptions[value]
  867. with gr.Blocks(analytics_enabled=False) as modelmerger_interface:
  868. with gr.Row().style(equal_height=False):
  869. with gr.Column(variant='compact'):
  870. interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description")
  871. with FormRow(elem_id="modelmerger_models"):
  872. primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)")
  873. create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A")
  874. secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)")
  875. create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B")
  876. tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)")
  877. create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C")
  878. custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name")
  879. interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount")
  880. interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method")
  881. interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description])
  882. with FormRow():
  883. checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format")
  884. save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half")
  885. save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata")
  886. with FormRow():
  887. with gr.Column():
  888. config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method")
  889. with gr.Column():
  890. with FormRow():
  891. bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae")
  892. create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae")
  893. with FormRow():
  894. discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights")
  895. with gr.Row():
  896. modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary')
  897. with gr.Column(variant='compact', elem_id="modelmerger_results_container"):
  898. with gr.Group(elem_id="modelmerger_results_panel"):
  899. modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False)
  900. with gr.Blocks(analytics_enabled=False) as train_interface:
  901. with gr.Row().style(equal_height=False):
  902. gr.HTML(value="<p style='margin-bottom: 0.7em'>See <b><a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\">wiki</a></b> for detailed explanation.</p>")
  903. with gr.Row(variant="compact").style(equal_height=False):
  904. with gr.Tabs(elem_id="train_tabs"):
  905. with gr.Tab(label="Create embedding", id="create_embedding"):
  906. new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name")
  907. initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text")
  908. nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt")
  909. overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding")
  910. with gr.Row():
  911. with gr.Column(scale=3):
  912. gr.HTML(value="")
  913. with gr.Column():
  914. create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding")
  915. with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"):
  916. new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name")
  917. new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes")
  918. new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure")
  919. new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func")
  920. new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option")
  921. new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm")
  922. new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout")
  923. new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'")
  924. overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork")
  925. with gr.Row():
  926. with gr.Column(scale=3):
  927. gr.HTML(value="")
  928. with gr.Column():
  929. create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork")
  930. with gr.Tab(label="Preprocess images", id="preprocess_images"):
  931. process_src = gr.Textbox(label='Source directory', elem_id="train_process_src")
  932. process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst")
  933. process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width")
  934. process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height")
  935. preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action")
  936. with gr.Row():
  937. process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size")
  938. process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip")
  939. process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split")
  940. process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop")
  941. process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop")
  942. process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption")
  943. process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru")
  944. with gr.Row(visible=False) as process_split_extra_row:
  945. process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold")
  946. process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio")
  947. with gr.Row(visible=False) as process_focal_crop_row:
  948. process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight")
  949. process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight")
  950. process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight")
  951. process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
  952. with gr.Column(visible=False) as process_multicrop_col:
  953. gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
  954. with gr.Row():
  955. process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim")
  956. process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim")
  957. with gr.Row():
  958. process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea")
  959. process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea")
  960. with gr.Row():
  961. process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective")
  962. process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold")
  963. with gr.Row():
  964. with gr.Column(scale=3):
  965. gr.HTML(value="")
  966. with gr.Column():
  967. with gr.Row():
  968. interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing")
  969. run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess")
  970. process_split.change(
  971. fn=lambda show: gr_show(show),
  972. inputs=[process_split],
  973. outputs=[process_split_extra_row],
  974. )
  975. process_focal_crop.change(
  976. fn=lambda show: gr_show(show),
  977. inputs=[process_focal_crop],
  978. outputs=[process_focal_crop_row],
  979. )
  980. process_multicrop.change(
  981. fn=lambda show: gr_show(show),
  982. inputs=[process_multicrop],
  983. outputs=[process_multicrop_col],
  984. )
  985. def get_textual_inversion_template_names():
  986. return sorted(textual_inversion.textual_inversion_templates)
  987. with gr.Tab(label="Train", id="train"):
  988. gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images <a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\" style=\"font-weight:bold;\">[wiki]</a></p>")
  989. with FormRow():
  990. train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
  991. create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name")
  992. train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks))
  993. create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name")
  994. with FormRow():
  995. embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate")
  996. hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate")
  997. with FormRow():
  998. clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"])
  999. clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False)
  1000. with FormRow():
  1001. batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size")
  1002. gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step")
  1003. dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory")
  1004. log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory")
  1005. with FormRow():
  1006. template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names())
  1007. create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file")
  1008. training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width")
  1009. training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height")
  1010. varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize")
  1011. steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps")
  1012. with FormRow():
  1013. create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every")
  1014. save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every")
  1015. use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight")
  1016. save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding")
  1017. preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img")
  1018. shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags")
  1019. tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out")
  1020. latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method")
  1021. with gr.Row():
  1022. train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding")
  1023. interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training")
  1024. train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork")
  1025. params = script_callbacks.UiTrainTabParams(txt2img_preview_params)
  1026. script_callbacks.ui_train_tabs_callback(params)
  1027. with gr.Column(elem_id='ti_gallery_container'):
  1028. ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
  1029. gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4)
  1030. gr.HTML(elem_id="ti_progress", value="")
  1031. ti_outcome = gr.HTML(elem_id="ti_error", value="")
  1032. create_embedding.click(
  1033. fn=modules.textual_inversion.ui.create_embedding,
  1034. inputs=[
  1035. new_embedding_name,
  1036. initialization_text,
  1037. nvpt,
  1038. overwrite_old_embedding,
  1039. ],
  1040. outputs=[
  1041. train_embedding_name,
  1042. ti_output,
  1043. ti_outcome,
  1044. ]
  1045. )
  1046. create_hypernetwork.click(
  1047. fn=modules.hypernetworks.ui.create_hypernetwork,
  1048. inputs=[
  1049. new_hypernetwork_name,
  1050. new_hypernetwork_sizes,
  1051. overwrite_old_hypernetwork,
  1052. new_hypernetwork_layer_structure,
  1053. new_hypernetwork_activation_func,
  1054. new_hypernetwork_initialization_option,
  1055. new_hypernetwork_add_layer_norm,
  1056. new_hypernetwork_use_dropout,
  1057. new_hypernetwork_dropout_structure
  1058. ],
  1059. outputs=[
  1060. train_hypernetwork_name,
  1061. ti_output,
  1062. ti_outcome,
  1063. ]
  1064. )
  1065. run_preprocess.click(
  1066. fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]),
  1067. _js="start_training_textual_inversion",
  1068. inputs=[
  1069. dummy_component,
  1070. process_src,
  1071. process_dst,
  1072. process_width,
  1073. process_height,
  1074. preprocess_txt_action,
  1075. process_keep_original_size,
  1076. process_flip,
  1077. process_split,
  1078. process_caption,
  1079. process_caption_deepbooru,
  1080. process_split_threshold,
  1081. process_overlap_ratio,
  1082. process_focal_crop,
  1083. process_focal_crop_face_weight,
  1084. process_focal_crop_entropy_weight,
  1085. process_focal_crop_edges_weight,
  1086. process_focal_crop_debug,
  1087. process_multicrop,
  1088. process_multicrop_mindim,
  1089. process_multicrop_maxdim,
  1090. process_multicrop_minarea,
  1091. process_multicrop_maxarea,
  1092. process_multicrop_objective,
  1093. process_multicrop_threshold,
  1094. ],
  1095. outputs=[
  1096. ti_output,
  1097. ti_outcome,
  1098. ],
  1099. )
  1100. train_embedding.click(
  1101. fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]),
  1102. _js="start_training_textual_inversion",
  1103. inputs=[
  1104. dummy_component,
  1105. train_embedding_name,
  1106. embedding_learn_rate,
  1107. batch_size,
  1108. gradient_step,
  1109. dataset_directory,
  1110. log_directory,
  1111. training_width,
  1112. training_height,
  1113. varsize,
  1114. steps,
  1115. clip_grad_mode,
  1116. clip_grad_value,
  1117. shuffle_tags,
  1118. tag_drop_out,
  1119. latent_sampling_method,
  1120. use_weight,
  1121. create_image_every,
  1122. save_embedding_every,
  1123. template_file,
  1124. save_image_with_stored_embedding,
  1125. preview_from_txt2img,
  1126. *txt2img_preview_params,
  1127. ],
  1128. outputs=[
  1129. ti_output,
  1130. ti_outcome,
  1131. ]
  1132. )
  1133. train_hypernetwork.click(
  1134. fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]),
  1135. _js="start_training_textual_inversion",
  1136. inputs=[
  1137. dummy_component,
  1138. train_hypernetwork_name,
  1139. hypernetwork_learn_rate,
  1140. batch_size,
  1141. gradient_step,
  1142. dataset_directory,
  1143. log_directory,
  1144. training_width,
  1145. training_height,
  1146. varsize,
  1147. steps,
  1148. clip_grad_mode,
  1149. clip_grad_value,
  1150. shuffle_tags,
  1151. tag_drop_out,
  1152. latent_sampling_method,
  1153. use_weight,
  1154. create_image_every,
  1155. save_embedding_every,
  1156. template_file,
  1157. preview_from_txt2img,
  1158. *txt2img_preview_params,
  1159. ],
  1160. outputs=[
  1161. ti_output,
  1162. ti_outcome,
  1163. ]
  1164. )
  1165. interrupt_training.click(
  1166. fn=lambda: shared.state.interrupt(),
  1167. inputs=[],
  1168. outputs=[],
  1169. )
  1170. interrupt_preprocessing.click(
  1171. fn=lambda: shared.state.interrupt(),
  1172. inputs=[],
  1173. outputs=[],
  1174. )
  1175. loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file)
  1176. settings = ui_settings.UiSettings()
  1177. settings.create_ui(loadsave, dummy_component)
  1178. interfaces = [
  1179. (txt2img_interface, "txt2img", "txt2img"),
  1180. (img2img_interface, "img2img", "img2img"),
  1181. (extras_interface, "Extras", "extras"),
  1182. (pnginfo_interface, "PNG Info", "pnginfo"),
  1183. (modelmerger_interface, "Checkpoint Merger", "modelmerger"),
  1184. (train_interface, "Train", "train"),
  1185. ]
  1186. interfaces += script_callbacks.ui_tabs_callback()
  1187. interfaces += [(settings.interface, "Settings", "settings")]
  1188. extensions_interface = ui_extensions.create_ui()
  1189. interfaces += [(extensions_interface, "Extensions", "extensions")]
  1190. shared.tab_names = []
  1191. for _interface, label, _ifid in interfaces:
  1192. shared.tab_names.append(label)
  1193. with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo:
  1194. settings.add_quicksettings()
  1195. parameters_copypaste.connect_paste_params_buttons()
  1196. with gr.Tabs(elem_id="tabs") as tabs:
  1197. tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)}
  1198. sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999))
  1199. for interface, label, ifid in sorted_interfaces:
  1200. if label in shared.opts.hidden_tabs:
  1201. continue
  1202. with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"):
  1203. interface.render()
  1204. for interface, _label, ifid in interfaces:
  1205. if ifid in ["extensions", "settings"]:
  1206. continue
  1207. loadsave.add_block(interface, ifid)
  1208. loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs)
  1209. loadsave.setup_ui()
  1210. if os.path.exists(os.path.join(script_path, "notification.mp3")):
  1211. gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
  1212. footer = shared.html("footer.html")
  1213. footer = footer.format(versions=versions_html())
  1214. gr.HTML(footer, elem_id="footer")
  1215. settings.add_functionality(demo)
  1216. update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit")
  1217. settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale])
  1218. demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale])
  1219. def modelmerger(*args):
  1220. try:
  1221. results = modules.extras.run_modelmerger(*args)
  1222. except Exception as e:
  1223. errors.report("Error loading/saving model file", exc_info=True)
  1224. modules.sd_models.list_models() # to remove the potentially missing models from the list
  1225. return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"]
  1226. return results
  1227. modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result])
  1228. modelmerger_merge.click(
  1229. fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]),
  1230. _js='modelmerger',
  1231. inputs=[
  1232. dummy_component,
  1233. primary_model_name,
  1234. secondary_model_name,
  1235. tertiary_model_name,
  1236. interp_method,
  1237. interp_amount,
  1238. save_as_half,
  1239. custom_name,
  1240. checkpoint_format,
  1241. config_source,
  1242. bake_in_vae,
  1243. discard_weights,
  1244. save_metadata,
  1245. ],
  1246. outputs=[
  1247. primary_model_name,
  1248. secondary_model_name,
  1249. tertiary_model_name,
  1250. settings.component_dict['sd_model_checkpoint'],
  1251. modelmerger_result,
  1252. ]
  1253. )
  1254. loadsave.dump_defaults()
  1255. demo.ui_loadsave = loadsave
  1256. # Required as a workaround for change() event not triggering when loading values from ui-config.json
  1257. interp_description.value = update_interp_description(interp_method.value)
  1258. return demo
  1259. def versions_html():
  1260. import torch
  1261. import launch
  1262. python_version = ".".join([str(x) for x in sys.version_info[0:3]])
  1263. commit = launch.commit_hash()
  1264. tag = launch.git_tag()
  1265. if shared.xformers_available:
  1266. import xformers
  1267. xformers_version = xformers.__version__
  1268. else:
  1269. xformers_version = "N/A"
  1270. return f"""
  1271. version: <a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/{commit}">{tag}</a>
  1272. &#x2000;•&#x2000;
  1273. python: <span title="{sys.version}">{python_version}</span>
  1274. &#x2000;•&#x2000;
  1275. torch: {getattr(torch, '__long_version__',torch.__version__)}
  1276. &#x2000;•&#x2000;
  1277. xformers: {xformers_version}
  1278. &#x2000;•&#x2000;
  1279. gradio: {gr.__version__}
  1280. &#x2000;•&#x2000;
  1281. checkpoint: <a id="sd_checkpoint_hash">N/A</a>
  1282. """
  1283. def setup_ui_api(app):
  1284. from pydantic import BaseModel, Field
  1285. from typing import List
  1286. class QuicksettingsHint(BaseModel):
  1287. name: str = Field(title="Name of the quicksettings field")
  1288. label: str = Field(title="Label of the quicksettings field")
  1289. def quicksettings_hint():
  1290. return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()]
  1291. app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint])
  1292. app.add_api_route("/internal/ping", lambda: {}, methods=["GET"])