sd_samplers_common.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. from collections import namedtuple
  2. import numpy as np
  3. import torch
  4. from PIL import Image
  5. from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared
  6. from modules.shared import opts, state
  7. SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
  8. def setup_img2img_steps(p, steps=None):
  9. if opts.img2img_fix_steps or steps is not None:
  10. requested_steps = (steps or p.steps)
  11. steps = int(requested_steps / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
  12. t_enc = requested_steps - 1
  13. else:
  14. steps = p.steps
  15. t_enc = int(min(p.denoising_strength, 0.999) * steps)
  16. return steps, t_enc
  17. approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD": 3}
  18. def single_sample_to_image(sample, approximation=None):
  19. if approximation is None:
  20. approximation = approximation_indexes.get(opts.show_progress_type, 0)
  21. if approximation == 2:
  22. x_sample = sd_vae_approx.cheap_approximation(sample) * 0.5 + 0.5
  23. elif approximation == 1:
  24. x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() * 0.5 + 0.5
  25. elif approximation == 3:
  26. x_sample = sample * 1.5
  27. x_sample = sd_vae_taesd.model()(x_sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
  28. else:
  29. x_sample = decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] * 0.5 + 0.5
  30. x_sample = torch.clamp(x_sample, min=0.0, max=1.0)
  31. x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
  32. x_sample = x_sample.astype(np.uint8)
  33. return Image.fromarray(x_sample)
  34. def decode_first_stage(model, x):
  35. x = model.decode_first_stage(x.to(devices.dtype_vae))
  36. return x
  37. def sample_to_image(samples, index=0, approximation=None):
  38. return single_sample_to_image(samples[index], approximation)
  39. def samples_to_image_grid(samples, approximation=None):
  40. return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples])
  41. def store_latent(decoded):
  42. state.current_latent = decoded
  43. if opts.live_previews_enable and opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
  44. if not shared.parallel_processing_allowed:
  45. shared.state.assign_current_image(sample_to_image(decoded))
  46. def is_sampler_using_eta_noise_seed_delta(p):
  47. """returns whether sampler from config will use eta noise seed delta for image creation"""
  48. sampler_config = sd_samplers.find_sampler_config(p.sampler_name)
  49. eta = p.eta
  50. if eta is None and p.sampler is not None:
  51. eta = p.sampler.eta
  52. if eta is None and sampler_config is not None:
  53. eta = 0 if sampler_config.options.get("default_eta_is_0", False) else 1.0
  54. if eta == 0:
  55. return False
  56. return sampler_config.options.get("uses_ensd", False)
  57. class InterruptedException(BaseException):
  58. pass
  59. def replace_torchsde_browinan():
  60. import torchsde._brownian.brownian_interval
  61. def torchsde_randn(size, dtype, device, seed):
  62. return devices.randn_local(seed, size).to(device=device, dtype=dtype)
  63. torchsde._brownian.brownian_interval._randn = torchsde_randn
  64. replace_torchsde_browinan()