瀏覽代碼

Merge pull request #11046 from akx/ded-code

Remove a bunch of unused/vestigial code
AUTOMATIC1111 2 年之前
父節點
當前提交
1bf01b73f4

+ 0 - 7
modules/api/api.py

@@ -32,13 +32,6 @@ import piexif
 import piexif.helper
 import piexif.helper
 
 
 
 
-def upscaler_to_index(name: str):
-    try:
-        return [x.name.lower() for x in shared.sd_upscalers].index(name.lower())
-    except Exception as e:
-        raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in shared.sd_upscalers])}") from e
-
-
 def script_name_to_index(name, scripts):
 def script_name_to_index(name, scripts):
     try:
     try:
         return [script.title().lower() for script in scripts].index(name.lower())
         return [script.title().lower() for script in scripts].index(name.lower())

+ 0 - 4
modules/api/models.py

@@ -274,10 +274,6 @@ class PromptStyleItem(BaseModel):
     prompt: Optional[str] = Field(title="Prompt")
     prompt: Optional[str] = Field(title="Prompt")
     negative_prompt: Optional[str] = Field(title="Negative Prompt")
     negative_prompt: Optional[str] = Field(title="Negative Prompt")
 
 
-class ArtistItem(BaseModel):
-    name: str = Field(title="Name")
-    score: float = Field(title="Score")
-    category: str = Field(title="Category")
 
 
 class EmbeddingItem(BaseModel):
 class EmbeddingItem(BaseModel):
     step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available")
     step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available")

+ 0 - 4
modules/codeformer_model.py

@@ -15,7 +15,6 @@ model_dir = "Codeformer"
 model_path = os.path.join(models_path, model_dir)
 model_path = os.path.join(models_path, model_dir)
 model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
 model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
 
 
-have_codeformer = False
 codeformer = None
 codeformer = None
 
 
 
 
@@ -123,9 +122,6 @@ def setup_model(dirname):
 
 
                 return restored_img
                 return restored_img
 
 
-        global have_codeformer
-        have_codeformer = True
-
         global codeformer
         global codeformer
         codeformer = FaceRestorerCodeFormer(dirname)
         codeformer = FaceRestorerCodeFormer(dirname)
         shared.face_restorers.append(codeformer)
         shared.face_restorers.append(codeformer)

+ 0 - 7
modules/devices.py

@@ -15,13 +15,6 @@ def has_mps() -> bool:
     else:
     else:
         return mac_specific.has_mps
         return mac_specific.has_mps
 
 
-def extract_device_id(args, name):
-    for x in range(len(args)):
-        if name in args[x]:
-            return args[x + 1]
-
-    return None
-
 
 
 def get_cuda_device_string():
 def get_cuda_device_string():
     from modules import shared
     from modules import shared

+ 0 - 29
modules/generation_parameters_copypaste.py

@@ -174,31 +174,6 @@ def send_image_and_dimensions(x):
     return img, w, h
     return img, w, h
 
 
 
 
-
-def find_hypernetwork_key(hypernet_name, hypernet_hash=None):
-    """Determines the config parameter name to use for the hypernet based on the parameters in the infotext.
-
-    Example: an infotext provides "Hypernet: ke-ta" and "Hypernet hash: 1234abcd". For the "Hypernet" config
-    parameter this means there should be an entry that looks like "ke-ta-10000(1234abcd)" to set it to.
-
-    If the infotext has no hash, then a hypernet with the same name will be selected instead.
-    """
-    hypernet_name = hypernet_name.lower()
-    if hypernet_hash is not None:
-        # Try to match the hash in the name
-        for hypernet_key in shared.hypernetworks.keys():
-            result = re_hypernet_hash.search(hypernet_key)
-            if result is not None and result[1] == hypernet_hash:
-                return hypernet_key
-    else:
-        # Fall back to a hypernet with the same name
-        for hypernet_key in shared.hypernetworks.keys():
-            if hypernet_key.lower().startswith(hypernet_name):
-                return hypernet_key
-
-    return None
-
-
 def restore_old_hires_fix_params(res):
 def restore_old_hires_fix_params(res):
     """for infotexts that specify old First pass size parameter, convert it into
     """for infotexts that specify old First pass size parameter, convert it into
     width, height, and hr scale"""
     width, height, and hr scale"""
@@ -332,10 +307,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
     return res
     return res
 
 
 
 
-settings_map = {}
-
-
-
 infotext_to_setting_name_mapping = [
 infotext_to_setting_name_mapping = [
     ('Clip skip', 'CLIP_stop_at_last_layers', ),
     ('Clip skip', 'CLIP_stop_at_last_layers', ),
     ('Conditional mask weight', 'inpainting_mask_weight'),
     ('Conditional mask weight', 'inpainting_mask_weight'),

+ 0 - 24
modules/hypernetworks/hypernetwork.py

@@ -353,17 +353,6 @@ def load_hypernetworks(names, multipliers=None):
         shared.loaded_hypernetworks.append(hypernetwork)
         shared.loaded_hypernetworks.append(hypernetwork)
 
 
 
 
-def find_closest_hypernetwork_name(search: str):
-    if not search:
-        return None
-    search = search.lower()
-    applicable = [name for name in shared.hypernetworks if search in name.lower()]
-    if not applicable:
-        return None
-    applicable = sorted(applicable, key=lambda name: len(name))
-    return applicable[0]
-
-
 def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None):
 def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None):
     hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context_k.shape[2], None)
     hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context_k.shape[2], None)
 
 
@@ -446,18 +435,6 @@ def statistics(data):
     return total_information, recent_information
     return total_information, recent_information
 
 
 
 
-def report_statistics(loss_info:dict):
-    keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x]))
-    for key in keys:
-        try:
-            print("Loss statistics for file " + key)
-            info, recent = statistics(list(loss_info[key]))
-            print(info)
-            print(recent)
-        except Exception as e:
-            print(e)
-
-
 def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
 def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
     # Remove illegal characters from name.
     # Remove illegal characters from name.
     name = "".join( x for x in name if (x.isalnum() or x in "._- "))
     name = "".join( x for x in name if (x.isalnum() or x in "._- "))
@@ -770,7 +747,6 @@ Last saved image: {html.escape(last_saved_image)}<br/>
         pbar.leave = False
         pbar.leave = False
         pbar.close()
         pbar.close()
         hypernetwork.eval()
         hypernetwork.eval()
-        #report_statistics(loss_dict)
         sd_hijack_checkpoint.remove()
         sd_hijack_checkpoint.remove()
 
 
 
 

+ 0 - 14
modules/paths.py

@@ -38,17 +38,3 @@ for d, must_exist, what, options in path_dirs:
         else:
         else:
             sys.path.append(d)
             sys.path.append(d)
         paths[what] = d
         paths[what] = d
-
-
-class Prioritize:
-    def __init__(self, name):
-        self.name = name
-        self.path = None
-
-    def __enter__(self):
-        self.path = sys.path.copy()
-        sys.path = [paths[self.name]] + sys.path
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        sys.path = self.path
-        self.path = None