|
@@ -181,29 +181,38 @@ class EmbeddingDatabase:
|
|
else:
|
|
else:
|
|
return
|
|
return
|
|
|
|
|
|
|
|
+
|
|
# textual inversion embeddings
|
|
# textual inversion embeddings
|
|
if 'string_to_param' in data:
|
|
if 'string_to_param' in data:
|
|
param_dict = data['string_to_param']
|
|
param_dict = data['string_to_param']
|
|
param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11
|
|
param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11
|
|
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
|
|
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
|
|
emb = next(iter(param_dict.items()))[1]
|
|
emb = next(iter(param_dict.items()))[1]
|
|
- # diffuser concepts
|
|
|
|
- elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor:
|
|
|
|
|
|
+ vec = emb.detach().to(devices.device, dtype=torch.float32)
|
|
|
|
+ shape = vec.shape[-1]
|
|
|
|
+ vectors = vec.shape[0]
|
|
|
|
+ elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding
|
|
|
|
+ vec = {k: v.detach().to(devices.device, dtype=torch.float32) for k, v in data.items()}
|
|
|
|
+ shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1]
|
|
|
|
+ vectors = data['clip_g'].shape[0]
|
|
|
|
+ elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts
|
|
assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
|
|
assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
|
|
|
|
|
|
emb = next(iter(data.values()))
|
|
emb = next(iter(data.values()))
|
|
if len(emb.shape) == 1:
|
|
if len(emb.shape) == 1:
|
|
emb = emb.unsqueeze(0)
|
|
emb = emb.unsqueeze(0)
|
|
|
|
+ vec = emb.detach().to(devices.device, dtype=torch.float32)
|
|
|
|
+ shape = vec.shape[-1]
|
|
|
|
+ vectors = vec.shape[0]
|
|
else:
|
|
else:
|
|
raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
|
|
raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
|
|
|
|
|
|
- vec = emb.detach().to(devices.device, dtype=torch.float32)
|
|
|
|
embedding = Embedding(vec, name)
|
|
embedding = Embedding(vec, name)
|
|
embedding.step = data.get('step', None)
|
|
embedding.step = data.get('step', None)
|
|
embedding.sd_checkpoint = data.get('sd_checkpoint', None)
|
|
embedding.sd_checkpoint = data.get('sd_checkpoint', None)
|
|
embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
|
|
embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
|
|
- embedding.vectors = vec.shape[0]
|
|
|
|
- embedding.shape = vec.shape[-1]
|
|
|
|
|
|
+ embedding.vectors = vectors
|
|
|
|
+ embedding.shape = shape
|
|
embedding.filename = path
|
|
embedding.filename = path
|
|
embedding.set_hash(hashes.sha256(embedding.filename, "textual_inversion/" + name) or '')
|
|
embedding.set_hash(hashes.sha256(embedding.filename, "textual_inversion/" + name) or '')
|
|
|
|
|