devices.py 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. import sys
  2. import contextlib
  3. from functools import lru_cache
  4. import torch
  5. from modules import errors, shared
  6. if sys.platform == "darwin":
  7. from modules import mac_specific
  8. def has_mps() -> bool:
  9. if sys.platform != "darwin":
  10. return False
  11. else:
  12. return mac_specific.has_mps
  13. def get_cuda_device_string():
  14. if shared.cmd_opts.device_id is not None:
  15. return f"cuda:{shared.cmd_opts.device_id}"
  16. return "cuda"
  17. def get_optimal_device_name():
  18. if torch.cuda.is_available():
  19. return get_cuda_device_string()
  20. if has_mps():
  21. return "mps"
  22. return "cpu"
  23. def get_optimal_device():
  24. return torch.device(get_optimal_device_name())
  25. def get_device_for(task):
  26. if task in shared.cmd_opts.use_cpu:
  27. return cpu
  28. return get_optimal_device()
  29. def torch_gc():
  30. if torch.cuda.is_available():
  31. with torch.cuda.device(get_cuda_device_string()):
  32. torch.cuda.empty_cache()
  33. torch.cuda.ipc_collect()
  34. if has_mps():
  35. mac_specific.torch_mps_gc()
  36. def enable_tf32():
  37. if torch.cuda.is_available():
  38. # enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't
  39. # see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
  40. if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())):
  41. torch.backends.cudnn.benchmark = True
  42. torch.backends.cuda.matmul.allow_tf32 = True
  43. torch.backends.cudnn.allow_tf32 = True
  44. errors.run(enable_tf32, "Enabling TF32")
  45. cpu: torch.device = torch.device("cpu")
  46. device: torch.device = None
  47. device_interrogate: torch.device = None
  48. device_gfpgan: torch.device = None
  49. device_esrgan: torch.device = None
  50. device_codeformer: torch.device = None
  51. dtype: torch.dtype = torch.float16
  52. dtype_vae: torch.dtype = torch.float16
  53. dtype_unet: torch.dtype = torch.float16
  54. unet_needs_upcast = False
  55. def cond_cast_unet(input):
  56. return input.to(dtype_unet) if unet_needs_upcast else input
  57. def cond_cast_float(input):
  58. return input.float() if unet_needs_upcast else input
  59. nv_rng = None
  60. def autocast(disable=False):
  61. if disable:
  62. return contextlib.nullcontext()
  63. if dtype == torch.float32 or shared.cmd_opts.precision == "full":
  64. return contextlib.nullcontext()
  65. return torch.autocast("cuda")
  66. def without_autocast(disable=False):
  67. return torch.autocast("cuda", enabled=False) if torch.is_autocast_enabled() and not disable else contextlib.nullcontext()
  68. class NansException(Exception):
  69. pass
  70. def test_for_nans(x, where):
  71. if shared.cmd_opts.disable_nan_check:
  72. return
  73. if not torch.all(torch.isnan(x)).item():
  74. return
  75. if where == "unet":
  76. message = "A tensor with all NaNs was produced in Unet."
  77. if not shared.cmd_opts.no_half:
  78. message += " This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this."
  79. elif where == "vae":
  80. message = "A tensor with all NaNs was produced in VAE."
  81. if not shared.cmd_opts.no_half and not shared.cmd_opts.no_half_vae:
  82. message += " This could be because there's not enough precision to represent the picture. Try adding --no-half-vae commandline argument to fix this."
  83. else:
  84. message = "A tensor with all NaNs was produced."
  85. message += " Use --disable-nan-check commandline argument to disable this check."
  86. raise NansException(message)
  87. @lru_cache
  88. def first_time_calculation():
  89. """
  90. just do any calculation with pytorch layers - the first time this is done it allocaltes about 700MB of memory and
  91. spends about 2.7 seconds doing that, at least wih NVidia.
  92. """
  93. x = torch.zeros((1, 1)).to(device, dtype)
  94. linear = torch.nn.Linear(1, 1).to(device, dtype)
  95. linear(x)
  96. x = torch.zeros((1, 1, 3, 3)).to(device, dtype)
  97. conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype)
  98. conv2d(x)