|
@@ -10,8 +10,7 @@ import torch.hub
|
|
|
from torchvision import transforms
|
|
|
from torchvision.transforms.functional import InterpolationMode
|
|
|
|
|
|
-from modules import devices, paths, shared, lowvram, modelloader, errors
|
|
|
-from modules.torch_utils import get_param
|
|
|
+from modules import devices, paths, shared, lowvram, modelloader, errors, torch_utils
|
|
|
|
|
|
blip_image_eval_size = 384
|
|
|
clip_model_name = 'ViT-L/14'
|
|
@@ -132,7 +131,7 @@ class InterrogateModels:
|
|
|
|
|
|
self.clip_model = self.clip_model.to(devices.device_interrogate)
|
|
|
|
|
|
- self.dtype = get_param(self.clip_model).dtype
|
|
|
+ self.dtype = torch_utils.get_param(self.clip_model).dtype
|
|
|
|
|
|
def send_clip_to_ram(self):
|
|
|
if not shared.opts.interrogate_keep_models_in_memory:
|