Jelajahi Sumber

Add ability to choose using weighted loss or not

Shondoit 2 tahun lalu
induk
melakukan
edb10092de

+ 9 - 4
modules/hypernetworks/hypernetwork.py

@@ -496,7 +496,7 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None,
     shared.reload_hypernetworks()
 
 
-def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
     # images allows training previews to have infotext. Importing it at the top causes a circular import problem.
     from modules import images
 
@@ -554,7 +554,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
 
     pin_memory = shared.opts.pin_memory
 
-    ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize)
+    ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight)
 
     if shared.opts.save_training_settings_to_txt:
         saved_params = dict(
@@ -640,14 +640,19 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
                 
                 with devices.autocast():
                     x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
-                    w = batch.weight.to(devices.device, non_blocking=pin_memory)
+                    if use_weight:
+                        w = batch.weight.to(devices.device, non_blocking=pin_memory)
                     if tag_drop_out != 0 or shuffle_tags:
                         shared.sd_model.cond_stage_model.to(devices.device)
                         c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory)
                         shared.sd_model.cond_stage_model.to(devices.cpu)
                     else:
                         c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory)
-                    loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step
+                    if use_weight:
+                        loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step
+                        del w
+                    else:
+                        loss = shared.sd_model.forward(x, c)[0] / gradient_step
                     del x
                     del c
 

+ 10 - 5
modules/textual_inversion/dataset.py

@@ -31,7 +31,7 @@ class DatasetEntry:
 
 
 class PersonalizedBase(Dataset):
-    def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', varsize=False):
+    def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', varsize=False, use_weight=False):
         re_word = re.compile(shared.opts.dataset_filename_word_regex) if len(shared.opts.dataset_filename_word_regex) > 0 else None
 
         self.placeholder_token = placeholder_token
@@ -64,7 +64,7 @@ class PersonalizedBase(Dataset):
                 image = Image.open(path)
                 #Currently does not work for single color transparency
                 #We would need to read image.info['transparency'] for that
-                if 'A' in image.getbands():
+                if use_weight and 'A' in image.getbands():
                     alpha_channel = image.getchannel('A')
                 image = image.convert('RGB')
                 if not varsize:
@@ -104,7 +104,7 @@ class PersonalizedBase(Dataset):
                     latent_sampling_method = "once"
             latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu)
 
-            if alpha_channel is not None:
+            if use_weight and alpha_channel is not None:
                 channels, *latent_size = latent_sample.shape
                 weight_img = alpha_channel.resize(latent_size)
                 npweight = np.array(weight_img).astype(np.float32)
@@ -113,9 +113,11 @@ class PersonalizedBase(Dataset):
                 #Normalize the weight to a minimum of 0 and a mean of 1, that way the loss will be comparable to default.
                 weight -= weight.min()
                 weight /= weight.mean()
-            else:
+            elif use_weight:
                 #If an image does not have a alpha channel, add a ones weight map anyway so we can stack it later
                 weight = torch.ones([channels] + latent_size)
+            else:
+                weight = None
             
             if latent_sampling_method == "random":
                 entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist, weight=weight)
@@ -219,7 +221,10 @@ class BatchLoader:
         self.cond_text = [entry.cond_text for entry in data]
         self.cond = [entry.cond for entry in data]
         self.latent_sample = torch.stack([entry.latent_sample for entry in data]).squeeze(1)
-        self.weight = torch.stack([entry.weight for entry in data]).squeeze(1)
+        if all(entry.weight is not None for entry in data):
+            self.weight = torch.stack([entry.weight for entry in data]).squeeze(1)
+        else:
+            self.weight = None
         #self.emb_index = [entry.emb_index for entry in data]
         #print(self.latent_sample.device)
 

+ 9 - 4
modules/textual_inversion/textual_inversion.py

@@ -351,7 +351,7 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat
         assert log_directory, "Log directory is empty"
 
 
-def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
     save_embedding_every = save_embedding_every or 0
     create_image_every = create_image_every or 0
     template_file = textual_inversion_templates.get(template_filename, None)
@@ -410,7 +410,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
 
     pin_memory = shared.opts.pin_memory
 
-    ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize)
+    ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight)
 
     if shared.opts.save_training_settings_to_txt:
         save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()})
@@ -480,7 +480,8 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
             
                 with devices.autocast():
                     x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
-                    w = batch.weight.to(devices.device, non_blocking=pin_memory)
+                    if use_weight:
+                        w = batch.weight.to(devices.device, non_blocking=pin_memory)
                     c = shared.sd_model.cond_stage_model(batch.cond_text)
 
                     if is_training_inpainting_model:
@@ -491,7 +492,11 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
                     else:
                         cond = c
 
-                    loss = shared.sd_model.weighted_forward(x, cond, w)[0] / gradient_step
+                    if use_weight:
+                        loss = shared.sd_model.weighted_forward(x, cond, w)[0] / gradient_step
+                        del w
+                    else:
+                        loss = shared.sd_model.forward(x, cond)[0] / gradient_step
                     del x
 
                     _loss_step += loss.item()

+ 4 - 0
modules/ui.py

@@ -1191,6 +1191,8 @@ def create_ui():
                         create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every")
                         save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every")
 
+                    use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight")
+
                     save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding")
                     preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img")
 
@@ -1304,6 +1306,7 @@ def create_ui():
                 shuffle_tags,
                 tag_drop_out,
                 latent_sampling_method,
+                use_weight,
                 create_image_every,
                 save_embedding_every,
                 template_file,
@@ -1337,6 +1340,7 @@ def create_ui():
                 shuffle_tags,
                 tag_drop_out,
                 latent_sampling_method,
+                use_weight,
                 create_image_every,
                 save_embedding_every,
                 template_file,