txt2img.py 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. import json
  2. from contextlib import closing
  3. import modules.scripts
  4. from modules import processing, infotext_utils
  5. from modules.infotext_utils import create_override_settings_dict, parse_generation_parameters
  6. from modules.shared import opts
  7. import modules.shared as shared
  8. from modules.ui import plaintext_to_html
  9. from PIL import Image
  10. import gradio as gr
  11. def txt2img_create_processing(id_task: str, request: gr.Request, prompt: str, negative_prompt: str, prompt_styles, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_scheduler: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, *args, force_enable_hr=False):
  12. override_settings = create_override_settings_dict(override_settings_texts)
  13. if force_enable_hr:
  14. enable_hr = True
  15. p = processing.StableDiffusionProcessingTxt2Img(
  16. sd_model=shared.sd_model,
  17. outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
  18. outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids,
  19. prompt=prompt,
  20. styles=prompt_styles,
  21. negative_prompt=negative_prompt,
  22. batch_size=batch_size,
  23. n_iter=n_iter,
  24. cfg_scale=cfg_scale,
  25. width=width,
  26. height=height,
  27. enable_hr=enable_hr,
  28. denoising_strength=denoising_strength,
  29. hr_scale=hr_scale,
  30. hr_upscaler=hr_upscaler,
  31. hr_second_pass_steps=hr_second_pass_steps,
  32. hr_resize_x=hr_resize_x,
  33. hr_resize_y=hr_resize_y,
  34. hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name,
  35. hr_sampler_name=None if hr_sampler_name == 'Use same sampler' else hr_sampler_name,
  36. hr_scheduler=None if hr_scheduler == 'Use same scheduler' else hr_scheduler,
  37. hr_prompt=hr_prompt,
  38. hr_negative_prompt=hr_negative_prompt,
  39. override_settings=override_settings,
  40. )
  41. p.scripts = modules.scripts.scripts_txt2img
  42. p.script_args = args
  43. p.user = request.username
  44. if shared.opts.enable_console_prompts:
  45. print(f"\ntxt2img: {prompt}", file=shared.progress_print_out)
  46. return p
  47. def txt2img_upscale(id_task: str, request: gr.Request, gallery, gallery_index, generation_info, *args):
  48. assert len(gallery) > 0, 'No image to upscale'
  49. assert 0 <= gallery_index < len(gallery), f'Bad image index: {gallery_index}'
  50. p = txt2img_create_processing(id_task, request, *args, force_enable_hr=True)
  51. p.batch_size = 1
  52. p.n_iter = 1
  53. # txt2img_upscale attribute that signifies this is called by txt2img_upscale
  54. p.txt2img_upscale = True
  55. geninfo = json.loads(generation_info)
  56. image_info = gallery[gallery_index] if 0 <= gallery_index < len(gallery) else gallery[0]
  57. p.firstpass_image = infotext_utils.image_from_url_text(image_info)
  58. parameters = parse_generation_parameters(geninfo.get('infotexts')[gallery_index], [])
  59. p.seed = parameters.get('Seed', -1)
  60. p.subseed = parameters.get('Variation seed', -1)
  61. p.override_settings['save_images_before_highres_fix'] = False
  62. with closing(p):
  63. processed = modules.scripts.scripts_txt2img.run(p, *p.script_args)
  64. if processed is None:
  65. processed = processing.process_images(p)
  66. shared.total_tqdm.clear()
  67. new_gallery = []
  68. for i, image in enumerate(gallery):
  69. if i == gallery_index:
  70. geninfo["infotexts"][gallery_index: gallery_index+1] = processed.infotexts
  71. new_gallery.extend(processed.images)
  72. else:
  73. fake_image = Image.new(mode="RGB", size=(1, 1))
  74. fake_image.already_saved_as = image["name"].rsplit('?', 1)[0]
  75. new_gallery.append(fake_image)
  76. geninfo["infotexts"][gallery_index] = processed.info
  77. return new_gallery, json.dumps(geninfo), plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments")
  78. def txt2img(id_task: str, request: gr.Request, *args):
  79. p = txt2img_create_processing(id_task, request, *args)
  80. with closing(p):
  81. processed = modules.scripts.scripts_txt2img.run(p, *p.script_args)
  82. if processed is None:
  83. processed = processing.process_images(p)
  84. shared.total_tqdm.clear()
  85. generation_info_js = processed.js()
  86. if opts.samples_log_stdout:
  87. print(generation_info_js)
  88. if opts.do_not_show_images:
  89. processed.images = []
  90. return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments")