Parcourir la source

rename print_error to report, use it with together with package name

AUTOMATIC il y a 2 ans
Parent
commit
05933840f0

+ 2 - 3
extensions-builtin/LDSR/scripts/ldsr_model.py

@@ -2,10 +2,9 @@ import os
 
 from basicsr.utils.download_util import load_file_from_url
 
-from modules.errors import print_error
 from modules.upscaler import Upscaler, UpscalerData
 from ldsr_model_arch import LDSR
-from modules import shared, script_callbacks
+from modules import shared, script_callbacks, errors
 import sd_hijack_autoencoder  # noqa: F401
 import sd_hijack_ddpm_v1  # noqa: F401
 
@@ -51,7 +50,7 @@ class UpscalerLDSR(Upscaler):
         try:
             return LDSR(model, yaml)
         except Exception:
-            print_error("Error importing LDSR", exc_info=True)
+            errors.report("Error importing LDSR", exc_info=True)
         return None
 
     def do_upscale(self, img, path):

+ 2 - 3
extensions-builtin/ScuNET/scripts/scunet_model.py

@@ -9,10 +9,9 @@ from tqdm import tqdm
 from basicsr.utils.download_util import load_file_from_url
 
 import modules.upscaler
-from modules import devices, modelloader, script_callbacks
+from modules import devices, modelloader, script_callbacks, errors
 from scunet_model_arch import SCUNet as net
 
-from modules.errors import print_error
 from modules.shared import opts
 
 
@@ -39,7 +38,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
                 scaler_data = modules.upscaler.UpscalerData(name, file, self, 4)
                 scalers.append(scaler_data)
             except Exception:
-                print_error(f"Error loading ScuNET model: {file}", exc_info=True)
+                errors.report(f"Error loading ScuNET model: {file}", exc_info=True)
         if add_model2:
             scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self)
             scalers.append(scaler_data2)

+ 2 - 3
modules/api/api.py

@@ -14,9 +14,8 @@ from fastapi.encoders import jsonable_encoder
 from secrets import compare_digest
 
 import modules.shared as shared
-from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing
+from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors
 from modules.api import models
-from modules.errors import print_error
 from modules.shared import opts
 from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
 from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
@@ -145,7 +144,7 @@ def api_middleware(app: FastAPI):
                 print(message)
                 console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200]))
             else:
-                print_error(message, exc_info=True)
+                errors.report(message, exc_info=True)
         return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err))
 
     @app.middleware("http")

+ 2 - 3
modules/call_queue.py

@@ -2,8 +2,7 @@ import html
 import threading
 import time
 
-from modules import shared, progress
-from modules.errors import print_error
+from modules import shared, progress, errors
 
 queue_lock = threading.Lock()
 
@@ -62,7 +61,7 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
             arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len]
             if len(arg_str) > max_debug_str_len:
                 arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)"
-            print_error(f"{message}\n{arg_str}", exc_info=True)
+            errors.report(f"{message}\n{arg_str}", exc_info=True)
 
             shared.state.job = ""
             shared.state.job_count = 0

+ 3 - 4
modules/codeformer_model.py

@@ -5,8 +5,7 @@ import torch
 
 import modules.face_restoration
 import modules.shared
-from modules import shared, devices, modelloader
-from modules.errors import print_error
+from modules import shared, devices, modelloader, errors
 from modules.paths import models_path
 
 # codeformer people made a choice to include modified basicsr library to their project which makes
@@ -105,7 +104,7 @@ def setup_model(dirname):
                         del output
                         torch.cuda.empty_cache()
                     except Exception:
-                        print_error('Failed inference for CodeFormer', exc_info=True)
+                        errors.report('Failed inference for CodeFormer', exc_info=True)
                         restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
 
                     restored_face = restored_face.astype('uint8')
@@ -134,6 +133,6 @@ def setup_model(dirname):
         shared.face_restorers.append(codeformer)
 
     except Exception:
-        print_error("Error setting up CodeFormer", exc_info=True)
+        errors.report("Error setting up CodeFormer", exc_info=True)
 
    # sys.path = stored_sys_path

+ 4 - 5
modules/config_states.py

@@ -11,8 +11,7 @@ from datetime import datetime
 from collections import OrderedDict
 import git
 
-from modules import shared, extensions
-from modules.errors import print_error
+from modules import shared, extensions, errors
 from modules.paths_internal import script_path, config_states_dir
 
 
@@ -52,7 +51,7 @@ def get_webui_config():
         if os.path.exists(os.path.join(script_path, ".git")):
             webui_repo = git.Repo(script_path)
     except Exception:
-        print_error(f"Error reading webui git info from {script_path}", exc_info=True)
+        errors.report(f"Error reading webui git info from {script_path}", exc_info=True)
 
     webui_remote = None
     webui_commit_hash = None
@@ -132,7 +131,7 @@ def restore_webui_config(config):
         if os.path.exists(os.path.join(script_path, ".git")):
             webui_repo = git.Repo(script_path)
     except Exception:
-        print_error(f"Error reading webui git info from {script_path}", exc_info=True)
+        errors.report(f"Error reading webui git info from {script_path}", exc_info=True)
         return
 
     try:
@@ -140,7 +139,7 @@ def restore_webui_config(config):
         webui_repo.git.reset(webui_commit_hash, hard=True)
         print(f"* Restored webui to commit {webui_commit_hash}.")
     except Exception:
-        print_error(f"Error restoring webui to commit{webui_commit_hash}")
+        errors.report(f"Error restoring webui to commit{webui_commit_hash}")
 
 
 def restore_extension_config(config):

+ 2 - 6
modules/errors.py

@@ -3,11 +3,7 @@ import textwrap
 import traceback
 
 
-def print_error(
-    message: str,
-    *,
-    exc_info: bool = False,
-) -> None:
+def report(message: str, *, exc_info: bool = False) -> None:
     """
     Print an error message to stderr, with optional traceback.
     """
@@ -15,7 +11,7 @@ def print_error(
         print("***", line, file=sys.stderr)
     if exc_info:
         print(textwrap.indent(traceback.format_exc(), "    "), file=sys.stderr)
-        print("---")
+        print("---", file=sys.stderr)
 
 
 def print_error_explanation(message):

+ 3 - 4
modules/extensions.py

@@ -1,8 +1,7 @@
 import os
 import threading
 
-from modules import shared
-from modules.errors import print_error
+from modules import shared, errors
 from modules.gitpython_hack import Repo
 from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path  # noqa: F401
 
@@ -54,7 +53,7 @@ class Extension:
             if os.path.exists(os.path.join(self.path, ".git")):
                 repo = Repo(self.path)
         except Exception:
-            print_error(f"Error reading github repository info from {self.path}", exc_info=True)
+            errors.report(f"Error reading github repository info from {self.path}", exc_info=True)
 
         if repo is None or repo.bare:
             self.remote = None
@@ -70,7 +69,7 @@ class Extension:
                 self.version = self.commit_hash[:8]
 
             except Exception:
-                print_error(f"Failed reading extension data from Git repository ({self.name})", exc_info=True)
+                errors.report(f"Failed reading extension data from Git repository ({self.name})", exc_info=True)
                 self.remote = None
 
         self.have_info_from_repo = True

+ 2 - 3
modules/gfpgan_model.py

@@ -4,8 +4,7 @@ import facexlib
 import gfpgan
 
 import modules.face_restoration
-from modules import paths, shared, devices, modelloader
-from modules.errors import print_error
+from modules import paths, shared, devices, modelloader, errors
 
 model_dir = "GFPGAN"
 user_path = None
@@ -111,4 +110,4 @@ def setup_model(dirname):
 
         shared.face_restorers.append(FaceRestorerGFPGAN())
     except Exception:
-        print_error("Error setting up GFPGAN", exc_info=True)
+        errors.report("Error setting up GFPGAN", exc_info=True)

+ 3 - 4
modules/hypernetworks/hypernetwork.py

@@ -9,8 +9,7 @@ import torch
 import tqdm
 from einops import rearrange, repeat
 from ldm.util import default
-from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint
-from modules.errors import print_error
+from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors
 from modules.textual_inversion import textual_inversion, logging
 from modules.textual_inversion.learn_schedule import LearnRateScheduler
 from torch import einsum
@@ -329,7 +328,7 @@ def load_hypernetwork(name):
         hypernetwork.load(path)
         return hypernetwork
     except Exception:
-        print_error(f"Error loading hypernetwork {path}", exc_info=True)
+        errors.report(f"Error loading hypernetwork {path}", exc_info=True)
         return None
 
 
@@ -766,7 +765,7 @@ Last saved image: {html.escape(last_saved_image)}<br/>
 </p>
 """
     except Exception:
-        print_error("Exception in training hypernetwork", exc_info=True)
+        errors.report("Exception in training hypernetwork", exc_info=True)
     finally:
         pbar.leave = False
         pbar.close()

+ 2 - 3
modules/images.py

@@ -16,7 +16,6 @@ import json
 import hashlib
 
 from modules import sd_samplers, shared, script_callbacks, errors
-from modules.errors import print_error
 from modules.paths_internal import roboto_ttf_file
 from modules.shared import opts
 
@@ -463,7 +462,7 @@ class FilenameGenerator:
                     replacement = fun(self, *pattern_args)
                 except Exception:
                     replacement = None
-                    print_error(f"Error adding [{pattern}] to filename", exc_info=True)
+                    errors.report(f"Error adding [{pattern}] to filename", exc_info=True)
 
                 if replacement == NOTHING_AND_SKIP_PREVIOUS_TEXT:
                     continue
@@ -698,7 +697,7 @@ def read_info_from_image(image):
 Negative prompt: {json_info["uc"]}
 Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337"""
         except Exception:
-            print_error("Error parsing NovelAI image generation parameters", exc_info=True)
+            errors.report("Error parsing NovelAI image generation parameters", exc_info=True)
 
     return geninfo, items
 

+ 1 - 2
modules/interrogate.py

@@ -11,7 +11,6 @@ from torchvision import transforms
 from torchvision.transforms.functional import InterpolationMode
 
 from modules import devices, paths, shared, lowvram, modelloader, errors
-from modules.errors import print_error
 
 blip_image_eval_size = 384
 clip_model_name = 'ViT-L/14'
@@ -216,7 +215,7 @@ class InterrogateModels:
                             res += f", {match}"
 
         except Exception:
-            print_error("Error interrogating", exc_info=True)
+            errors.report("Error interrogating", exc_info=True)
             res += "<error>"
 
         self.unload()

+ 3 - 4
modules/launch_utils.py

@@ -7,8 +7,7 @@ import platform
 import json
 from functools import lru_cache
 
-from modules import cmd_args
-from modules.errors import print_error
+from modules import cmd_args, errors
 from modules.paths_internal import script_path, extensions_dir
 
 args, _ = cmd_args.parser.parse_known_args()
@@ -189,7 +188,7 @@ def run_extension_installer(extension_dir):
 
         print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env))
     except Exception as e:
-        print_error(str(e))
+        errors.report(str(e))
 
 
 def list_extensions(settings_file):
@@ -200,7 +199,7 @@ def list_extensions(settings_file):
             with open(settings_file, "r", encoding="utf8") as file:
                 settings = json.load(file)
     except Exception:
-        print_error("Could not load settings", exc_info=True)
+        errors.report("Could not load settings", exc_info=True)
 
     disabled_extensions = set(settings.get('disabled_extensions', []))
     disable_all_extensions = settings.get('disable_all_extensions', 'none')

+ 2 - 2
modules/localization.py

@@ -1,7 +1,7 @@
 import json
 import os
 
-from modules.errors import print_error
+from modules import errors
 
 localizations = {}
 
@@ -30,6 +30,6 @@ def localization_js(current_localization_name: str) -> str:
             with open(fn, "r", encoding="utf8") as file:
                 data = json.load(file)
         except Exception:
-            print_error(f"Error loading localization from {fn}", exc_info=True)
+            errors.report(f"Error loading localization from {fn}", exc_info=True)
 
     return f"window.localization = {json.dumps(data)}"

+ 5 - 5
modules/realesrgan_model.py

@@ -5,10 +5,10 @@ from PIL import Image
 from basicsr.utils.download_util import load_file_from_url
 from realesrgan import RealESRGANer
 
-from modules.errors import print_error
 from modules.upscaler import Upscaler, UpscalerData
 from modules.shared import cmd_opts, opts
-from modules import modelloader
+from modules import modelloader, errors
+
 
 class UpscalerRealESRGAN(Upscaler):
     def __init__(self, path):
@@ -35,7 +35,7 @@ class UpscalerRealESRGAN(Upscaler):
                     self.scalers.append(scaler)
 
         except Exception:
-            print_error("Error importing Real-ESRGAN", exc_info=True)
+            errors.report("Error importing Real-ESRGAN", exc_info=True)
             self.enable = False
             self.scalers = []
 
@@ -75,7 +75,7 @@ class UpscalerRealESRGAN(Upscaler):
 
             return info
         except Exception:
-            print_error("Error making Real-ESRGAN models list", exc_info=True)
+            errors.report("Error making Real-ESRGAN models list", exc_info=True)
         return None
 
     def load_models(self, _):
@@ -132,4 +132,4 @@ def get_realesrgan_models(scaler):
         ]
         return models
     except Exception:
-        print_error("Error making Real-ESRGAN models list", exc_info=True)
+        errors.report("Error making Real-ESRGAN models list", exc_info=True)

+ 4 - 3
modules/safe.py

@@ -9,9 +9,10 @@ import _codecs
 import zipfile
 import re
 
-from modules.errors import print_error
 
 # PyTorch 1.13 and later have _TypedStorage renamed to TypedStorage
+from modules import errors
+
 TypedStorage = torch.storage.TypedStorage if hasattr(torch.storage, 'TypedStorage') else torch.storage._TypedStorage
 
 def encode(*args):
@@ -136,7 +137,7 @@ def load_with_extra(filename, extra_handler=None, *args, **kwargs):
             check_pt(filename, extra_handler)
 
     except pickle.UnpicklingError:
-        print_error(
+        errors.report(
             f"Error verifying pickled file from {filename}\n"
             "-----> !!!! The file is most likely corrupted !!!! <-----\n"
             "You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n",
@@ -144,7 +145,7 @@ def load_with_extra(filename, extra_handler=None, *args, **kwargs):
         )
         return None
     except Exception:
-        print_error(
+        errors.report(
             f"Error verifying pickled file from {filename}\n"
             f"The file may be malicious, so the program is not going to read it.\n"
             f"You can skip this check with --disable-safe-unpickle commandline argument.\n\n",

+ 2 - 2
modules/script_callbacks.py

@@ -5,11 +5,11 @@ from typing import Optional, Dict, Any
 from fastapi import FastAPI
 from gradio import Blocks
 
-from modules.errors import print_error
+from modules import errors
 
 
 def report_exception(c, job):
-    print_error(f"Error executing callback {job} for {c.script}", exc_info=True)
+    errors.report(f"Error executing callback {job} for {c.script}", exc_info=True)
 
 
 class ImageSaveParams:

+ 2 - 2
modules/script_loading.py

@@ -1,7 +1,7 @@
 import os
 import importlib.util
 
-from modules.errors import print_error
+from modules import errors
 
 
 def load_module(path):
@@ -27,4 +27,4 @@ def preload_extensions(extensions_dir, parser):
                 module.preload(parser)
 
         except Exception:
-            print_error(f"Error running preload() for {preload_script}", exc_info=True)
+            errors.report(f"Error running preload() for {preload_script}", exc_info=True)

+ 11 - 12
modules/scripts.py

@@ -5,8 +5,7 @@ from collections import namedtuple
 
 import gradio as gr
 
-from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing
-from modules.errors import print_error
+from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing, errors
 
 AlwaysVisible = object()
 
@@ -264,7 +263,7 @@ def load_scripts():
             register_scripts_from_module(script_module)
 
         except Exception:
-            print_error(f"Error loading script: {scriptfile.filename}", exc_info=True)
+            errors.report(f"Error loading script: {scriptfile.filename}", exc_info=True)
 
         finally:
             sys.path = syspath
@@ -281,7 +280,7 @@ def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
     try:
         return func(*args, **kwargs)
     except Exception:
-        print_error(f"Error calling: {filename}/{funcname}", exc_info=True)
+        errors.report(f"Error calling: {filename}/{funcname}", exc_info=True)
 
     return default
 
@@ -447,7 +446,7 @@ class ScriptRunner:
                 script_args = p.script_args[script.args_from:script.args_to]
                 script.process(p, *script_args)
             except Exception:
-                print_error(f"Error running process: {script.filename}", exc_info=True)
+                errors.report(f"Error running process: {script.filename}", exc_info=True)
 
     def before_process_batch(self, p, **kwargs):
         for script in self.alwayson_scripts:
@@ -455,7 +454,7 @@ class ScriptRunner:
                 script_args = p.script_args[script.args_from:script.args_to]
                 script.before_process_batch(p, *script_args, **kwargs)
             except Exception:
-                print_error(f"Error running before_process_batch: {script.filename}", exc_info=True)
+                errors.report(f"Error running before_process_batch: {script.filename}", exc_info=True)
 
     def process_batch(self, p, **kwargs):
         for script in self.alwayson_scripts:
@@ -463,7 +462,7 @@ class ScriptRunner:
                 script_args = p.script_args[script.args_from:script.args_to]
                 script.process_batch(p, *script_args, **kwargs)
             except Exception:
-                print_error(f"Error running process_batch: {script.filename}", exc_info=True)
+                errors.report(f"Error running process_batch: {script.filename}", exc_info=True)
 
     def postprocess(self, p, processed):
         for script in self.alwayson_scripts:
@@ -471,7 +470,7 @@ class ScriptRunner:
                 script_args = p.script_args[script.args_from:script.args_to]
                 script.postprocess(p, processed, *script_args)
             except Exception:
-                print_error(f"Error running postprocess: {script.filename}", exc_info=True)
+                errors.report(f"Error running postprocess: {script.filename}", exc_info=True)
 
     def postprocess_batch(self, p, images, **kwargs):
         for script in self.alwayson_scripts:
@@ -479,7 +478,7 @@ class ScriptRunner:
                 script_args = p.script_args[script.args_from:script.args_to]
                 script.postprocess_batch(p, *script_args, images=images, **kwargs)
             except Exception:
-                print_error(f"Error running postprocess_batch: {script.filename}", exc_info=True)
+                errors.report(f"Error running postprocess_batch: {script.filename}", exc_info=True)
 
     def postprocess_image(self, p, pp: PostprocessImageArgs):
         for script in self.alwayson_scripts:
@@ -487,21 +486,21 @@ class ScriptRunner:
                 script_args = p.script_args[script.args_from:script.args_to]
                 script.postprocess_image(p, pp, *script_args)
             except Exception:
-                print_error(f"Error running postprocess_image: {script.filename}", exc_info=True)
+                errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True)
 
     def before_component(self, component, **kwargs):
         for script in self.scripts:
             try:
                 script.before_component(component, **kwargs)
             except Exception:
-                print_error(f"Error running before_component: {script.filename}", exc_info=True)
+                errors.report(f"Error running before_component: {script.filename}", exc_info=True)
 
     def after_component(self, component, **kwargs):
         for script in self.scripts:
             try:
                 script.after_component(component, **kwargs)
             except Exception:
-                print_error(f"Error running after_component: {script.filename}", exc_info=True)
+                errors.report(f"Error running after_component: {script.filename}", exc_info=True)
 
     def reload_sources(self, cache):
         for si, script in list(enumerate(self.scripts)):

+ 1 - 2
modules/sd_hijack_optimizations.py

@@ -9,7 +9,6 @@ from ldm.util import default
 from einops import rearrange
 
 from modules import shared, errors, devices, sub_quadratic_attention
-from modules.errors import print_error
 from modules.hypernetworks import hypernetwork
 
 import ldm.modules.attention
@@ -139,7 +138,7 @@ if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers:
         import xformers.ops
         shared.xformers_available = True
     except Exception:
-        print_error("Cannot import xformers", exc_info=True)
+        errors.report("Cannot import xformers", exc_info=True)
 
 
 def get_available_vram():

+ 3 - 4
modules/textual_inversion/textual_inversion.py

@@ -12,9 +12,8 @@ import numpy as np
 from PIL import Image, PngImagePlugin
 from torch.utils.tensorboard import SummaryWriter
 
-from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint
+from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors
 import modules.textual_inversion.dataset
-from modules.errors import print_error
 from modules.textual_inversion.learn_schedule import LearnRateScheduler
 
 from modules.textual_inversion.image_embedding import embedding_to_b64, embedding_from_b64, insert_image_data_embed, extract_image_data_embed, caption_image_overlay
@@ -219,7 +218,7 @@ class EmbeddingDatabase:
 
                     self.load_from_file(fullfn, fn)
                 except Exception:
-                    print_error(f"Error loading embedding {fn}", exc_info=True)
+                    errors.report(f"Error loading embedding {fn}", exc_info=True)
                     continue
 
     def load_textual_inversion_embeddings(self, force_reload=False):
@@ -643,7 +642,7 @@ Last saved image: {html.escape(last_saved_image)}<br/>
         filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
         save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True)
     except Exception:
-        print_error("Error training embedding", exc_info=True)
+        errors.report("Error training embedding", exc_info=True)
     finally:
         pbar.leave = False
         pbar.close()

+ 3 - 4
modules/ui.py

@@ -12,8 +12,7 @@ import numpy as np
 from PIL import Image, PngImagePlugin  # noqa: F401
 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
 
-from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave
-from modules.errors import print_error
+from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors
 from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
 from modules.paths import script_path, data_path
 
@@ -232,7 +231,7 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info:
 
         except json.decoder.JSONDecodeError:
             if gen_info_string:
-                print_error(f"Error parsing JSON generation info: {gen_info_string}")
+                errors.report(f"Error parsing JSON generation info: {gen_info_string}")
 
         return [res, gr_show(False)]
 
@@ -1752,7 +1751,7 @@ def create_ui():
             try:
                 results = modules.extras.run_modelmerger(*args)
             except Exception as e:
-                print_error("Error loading/saving model file", exc_info=True)
+                errors.report("Error loading/saving model file", exc_info=True)
                 modules.sd_models.list_models()  # to remove the potentially missing models from the list
                 return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"]
             return results

+ 3 - 4
modules/ui_extensions.py

@@ -11,8 +11,7 @@ import html
 import shutil
 import errno
 
-from modules import extensions, shared, paths, config_states
-from modules.errors import print_error
+from modules import extensions, shared, paths, config_states, errors
 from modules.paths_internal import config_states_dir
 from modules.call_queue import wrap_gradio_gpu_call
 
@@ -45,7 +44,7 @@ def apply_and_restart(disable_list, update_list, disable_all):
         try:
             ext.fetch_and_reset_hard()
         except Exception:
-            print_error(f"Error getting updates for {ext.name}", exc_info=True)
+            errors.report(f"Error getting updates for {ext.name}", exc_info=True)
 
     shared.opts.disabled_extensions = disabled
     shared.opts.disable_all_extensions = disable_all
@@ -111,7 +110,7 @@ def check_updates(id_task, disable_list):
             if 'FETCH_HEAD' not in str(e):
                 raise
         except Exception:
-            print_error(f"Error checking updates for {ext.name}", exc_info=True)
+            errors.report(f"Error checking updates for {ext.name}", exc_info=True)
 
         shared.state.nextjob()
 

+ 2 - 3
scripts/prompts_from_file.py

@@ -5,8 +5,7 @@ import shlex
 import modules.scripts as scripts
 import gradio as gr
 
-from modules import sd_samplers
-from modules.errors import print_error
+from modules import sd_samplers, errors
 from modules.processing import Processed, process_images
 from modules.shared import state
 
@@ -135,7 +134,7 @@ class Script(scripts.Script):
                 try:
                     args = cmdargs(line)
                 except Exception:
-                    print_error(f"Error parsing line {line} as commandline", exc_info=True)
+                    errors.report(f"Error parsing line {line} as commandline", exc_info=True)
                     args = {"prompt": line}
             else:
                 args = {"prompt": line}