浏览代码

undo some changes from #15823 and fix whitespace

AUTOMATIC1111 1 年之前
父节点
当前提交
99e65ec618
共有 2 个文件被更改,包括 17 次插入15 次删除
  1. 2 2
      modules/sd_samplers_kdiffusion.py
  2. 15 13
      modules/sd_schedulers.py

+ 2 - 2
modules/sd_samplers_kdiffusion.py

@@ -1,7 +1,7 @@
 import torch
 import torch
 import inspect
 import inspect
 import k_diffusion.sampling
 import k_diffusion.sampling
-from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser, sd_schedulers
+from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser, sd_schedulers, devices
 from modules.sd_samplers_cfg_denoiser import CFGDenoiser  # noqa: F401
 from modules.sd_samplers_cfg_denoiser import CFGDenoiser  # noqa: F401
 from modules.script_callbacks import ExtraNoiseParams, extra_noise_callback
 from modules.script_callbacks import ExtraNoiseParams, extra_noise_callback
 
 
@@ -115,7 +115,7 @@ class KDiffusionSampler(sd_samplers_common.Sampler):
             if scheduler.need_inner_model:
             if scheduler.need_inner_model:
                 sigmas_kwargs['inner_model'] = self.model_wrap
                 sigmas_kwargs['inner_model'] = self.model_wrap
 
 
-            sigmas = scheduler.function(n=steps, **sigmas_kwargs)
+            sigmas = scheduler.function(n=steps, **sigmas_kwargs, device=devices.cpu)
 
 
         if discard_next_to_last_sigma:
         if discard_next_to_last_sigma:
             sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
             sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])

+ 15 - 13
modules/sd_schedulers.py

@@ -1,19 +1,19 @@
 import dataclasses
 import dataclasses
-
 import torch
 import torch
-
 import k_diffusion
 import k_diffusion
-
 import numpy as np
 import numpy as np
 
 
 from modules import shared
 from modules import shared
 
 
+
 def to_d(x, sigma, denoised):
 def to_d(x, sigma, denoised):
     """Converts a denoiser output to a Karras ODE derivative."""
     """Converts a denoiser output to a Karras ODE derivative."""
     return (x - denoised) / sigma
     return (x - denoised) / sigma
 
 
+
 k_diffusion.sampling.to_d = to_d
 k_diffusion.sampling.to_d = to_d
 
 
+
 @dataclasses.dataclass
 @dataclasses.dataclass
 class Scheduler:
 class Scheduler:
     name: str
     name: str
@@ -25,11 +25,11 @@ class Scheduler:
     aliases: list = None
     aliases: list = None
 
 
 
 
-def uniform(n, sigma_min, sigma_max, inner_model):
-    return inner_model.get_sigmas(n)
+def uniform(n, sigma_min, sigma_max, inner_model, device):
+    return inner_model.get_sigmas(n).to(device)
 
 
 
 
-def sgm_uniform(n, sigma_min, sigma_max, inner_model):
+def sgm_uniform(n, sigma_min, sigma_max, inner_model, device):
     start = inner_model.sigma_to_t(torch.tensor(sigma_max))
     start = inner_model.sigma_to_t(torch.tensor(sigma_max))
     end = inner_model.sigma_to_t(torch.tensor(sigma_min))
     end = inner_model.sigma_to_t(torch.tensor(sigma_min))
     sigs = [
     sigs = [
@@ -37,9 +37,10 @@ def sgm_uniform(n, sigma_min, sigma_max, inner_model):
         for ts in torch.linspace(start, end, n + 1)[:-1]
         for ts in torch.linspace(start, end, n + 1)[:-1]
     ]
     ]
     sigs += [0.0]
     sigs += [0.0]
-    return torch.FloatTensor(sigs)
+    return torch.FloatTensor(sigs).to(device)
 
 
-def get_align_your_steps_sigmas(n, sigma_min, sigma_max):
+
+def get_align_your_steps_sigmas(n, sigma_min, sigma_max, device):
     # https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
     # https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
     def loglinear_interp(t_steps, num_steps):
     def loglinear_interp(t_steps, num_steps):
         """
         """
@@ -65,12 +66,13 @@ def get_align_your_steps_sigmas(n, sigma_min, sigma_max):
     else:
     else:
         sigmas.append(0.0)
         sigmas.append(0.0)
 
 
-    return torch.FloatTensor(sigmas)
+    return torch.FloatTensor(sigmas).to(device)
+
 
 
-def kl_optimal(n, sigma_min, sigma_max):
-    alpha_min = torch.arctan(torch.tensor(sigma_min))
-    alpha_max = torch.arctan(torch.tensor(sigma_max))
-    step_indices = torch.arange(n + 1)
+def kl_optimal(n, sigma_min, sigma_max, device):
+    alpha_min = torch.arctan(torch.tensor(sigma_min, device=device))
+    alpha_max = torch.arctan(torch.tensor(sigma_max, device=device))
+    step_indices = torch.arange(n + 1, device=device)
     sigmas = torch.tan(step_indices / n * alpha_min + (1.0 - step_indices / n) * alpha_max)
     sigmas = torch.tan(step_indices / n * alpha_min + (1.0 - step_indices / n) * alpha_max)
     return sigmas
     return sigmas