Compare commits

..

3 Commits

Author SHA1 Message Date
w-e-w 6aa7925d36 lint 2024-11-23 19:12:13 +09:00
w-e-w a57adde5ae sort TI hashes 2024-11-23 19:09:09 +09:00
w-e-w e72a6c411a fix missing infotext cased by conda cache
some generation params such as TI hashes or Emphasis is added in sd_hijack / sd_hijack_clip
if conda are fetche from cache sd_hijack_clip will not be executed and it won't have a chance to to add generation params

the generation params will also be missing if in non low-vram mode because the hijack.extra_generation_params was never read after calculate_hr_conds
2024-11-23 19:09:09 +09:00
28 changed files with 145 additions and 254 deletions
-1
View File
@@ -88,7 +88,6 @@ module.exports = {
// imageviewer.js
modalPrevImage: "readonly",
modalNextImage: "readonly",
updateModalImageIfVisible: "readonly",
// localStorage.js
localSet: "readonly",
localGet: "readonly",
+1 -1
View File
@@ -22,7 +22,7 @@ jobs:
- name: Install Ruff
run: pip install ruff==0.3.3
- name: Run Ruff
run: ruff check .
run: ruff .
lint-js:
name: eslint
runs-on: ubuntu-latest
+1 -1
View File
@@ -133,7 +133,7 @@ If your system is very new, you need to install python3.11 or python3.10:
# Ubuntu 24.04
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt update
sudo apt install python3.11 python3.11-venv
sudo apt install python3.11
# Manjaro/Arch
sudo pacman -S yay
@@ -1,69 +1,36 @@
// Stable Diffusion WebUI - Bracket Checker
// By @Bwin4L, @akx, @w-e-w, @Haoming02
// Stable Diffusion WebUI - Bracket checker
// By Hingashi no Florin/Bwin4L & @akx
// Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs.
// If there's a mismatch, the keyword counter turns red, and if you hover on it, a tooltip tells you what's wrong.
function checkBrackets(textArea, counterElem) {
const pairs = [
['(', ')', 'round brackets'],
['[', ']', 'square brackets'],
['{', '}', 'curly brackets']
];
// If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong.
function checkBrackets(textArea, counterElt) {
const counts = {};
const errors = new Set();
let i = 0;
textArea.value.matchAll(/(?<!\\)(?:\\\\)*?([(){}[\]])/g).forEach(bracket => {
counts[bracket[1]] = (counts[bracket[1]] || 0) + 1;
});
const errors = [];
while (i < textArea.value.length) {
let char = textArea.value[i];
let escaped = false;
while (char === '\\' && i + 1 < textArea.value.length) {
escaped = !escaped;
i++;
char = textArea.value[i];
}
if (escaped) {
i++;
continue;
}
for (const [open, close, label] of pairs) {
if (char === open) {
counts[label] = (counts[label] || 0) + 1;
} else if (char === close) {
counts[label] = (counts[label] || 0) - 1;
if (counts[label] < 0) {
errors.add(`Incorrect order of ${label}.`);
}
}
}
i++;
}
for (const [open, close, label] of pairs) {
if (counts[label] == undefined) {
continue;
}
if (counts[label] > 0) {
errors.add(`${open} ... ${close} - Detected ${counts[label]} more opening than closing ${label}.`);
} else if (counts[label] < 0) {
errors.add(`${open} ... ${close} - Detected ${-counts[label]} more closing than opening ${label}.`);
function checkPair(open, close, kind) {
if (counts[open] !== counts[close]) {
errors.push(
`${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.`
);
}
}
counterElem.title = [...errors].join('\n');
counterElem.classList.toggle('error', errors.size !== 0);
checkPair('(', ')', 'round brackets');
checkPair('[', ']', 'square brackets');
checkPair('{', '}', 'curly brackets');
counterElt.title = errors.join('\n');
counterElt.classList.toggle('error', errors.length !== 0);
}
function setupBracketChecking(id_prompt, id_counter) {
const textarea = gradioApp().querySelector(`#${id_prompt} > label > textarea`);
const counter = gradioApp().getElementById(id_counter);
var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea");
var counter = gradioApp().getElementById(id_counter);
if (textarea && counter) {
onEdit(`${id_prompt}_BracketChecking`, textarea, 400, () => checkBrackets(textarea, counter));
textarea.addEventListener("input", () => checkBrackets(textarea, counter));
}
}
+1 -1
View File
@@ -1,5 +1,5 @@
<div>
<a href="{api_docs}" target="_blank">API</a>
<a href="{api_docs}">API</a>
 • 
<a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui">GitHub</a>
 • 
-2
View File
@@ -54,7 +54,6 @@ function updateOnBackgroundChange() {
updateModalImage();
}
}
const updateModalImageIfVisible = updateOnBackgroundChange;
function modalImageSwitch(offset) {
var galleryButtons = all_gallery_buttons();
@@ -165,7 +164,6 @@ function modalLivePreviewToggle(event) {
const modalToggleLivePreview = gradioApp().getElementById("modal_toggle_live_preview");
opts.js_live_preview_in_modal_lightbox = !opts.js_live_preview_in_modal_lightbox;
modalToggleLivePreview.innerHTML = opts.js_live_preview_in_modal_lightbox ? "&#x1F5C7;" : "&#x1F5C6;";
updateModalImageIfVisible();
event.stopPropagation();
}
+1 -1
View File
@@ -190,7 +190,7 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
livePreview.className = 'livePreview';
gallery.insertBefore(livePreview, gallery.firstElementChild);
}
updateModalImageIfVisible();
livePreview.appendChild(img);
if (livePreview.childElementCount > 2) {
livePreview.removeChild(livePreview.firstElementChild);
-5
View File
@@ -6,11 +6,6 @@ git = launch_utils.git
index_url = launch_utils.index_url
dir_repos = launch_utils.dir_repos
if args.uv:
from modules.uv_hook import patch
patch()
commit_hash = launch_utils.commit_hash
git_tag = launch_utils.git_tag
-1
View File
@@ -126,4 +126,3 @@ parser.add_argument("--skip-load-model-at-start", action='store_true', help="if
parser.add_argument("--unix-filenames-sanitization", action='store_true', help="allow any symbols except '/' in filenames. May conflict with your browser and file system")
parser.add_argument("--filenames-max-length", type=int, default=128, help='maximal length of filenames of saved images. If you override it, it can conflict with your file system')
parser.add_argument("--no-prompt-history", action='store_true', help="disable read prompt from last generation feature; settings this argument will not create '--data_path/params.txt' file")
parser.add_argument("--uv", action='store_true', help="use the uv package manager")
+2 -30
View File
@@ -1,7 +1,7 @@
import hashlib
import os.path
from modules import shared, errors
from modules import shared
import modules.cache
dump_cache = modules.cache.dump_cache
@@ -32,7 +32,7 @@ def sha256_from_cache(filename, title, use_addnet_hash=False):
cached_sha256 = hashes[title].get("sha256", None)
cached_mtime = hashes[title].get("mtime", 0)
if ondisk_mtime != cached_mtime or cached_sha256 is None:
if ondisk_mtime > cached_mtime or cached_sha256 is None:
return None
return cached_sha256
@@ -82,31 +82,3 @@ def addnet_hash_safetensors(b):
return hash_sha256.hexdigest()
def partial_hash_from_cache(filename, *, ignore_cache: bool = False, digits: int = 8):
"""old hash that only looks at a small part of the file and is prone to collisions
kept for compatibility, don't use this for new things
"""
try:
filename = str(filename)
mtime = os.path.getmtime(filename)
hashes = cache('partial-hash')
cache_entry = hashes.get(filename, {})
cache_mtime = cache_entry.get("mtime", 0)
cache_hash = cache_entry.get("hash", None)
if mtime == cache_mtime and cache_hash and not ignore_cache:
return cache_hash[0:digits]
with open(filename, 'rb') as file:
m = hashlib.sha256()
file.seek(0x100000)
m.update(file.read(0x10000))
partial_hash = m.hexdigest()
hashes[filename] = {'mtime': mtime, 'hash': partial_hash}
return partial_hash[0:digits]
except FileNotFoundError:
pass
except Exception:
errors.report(f'Error calculating partial hash for {filename}', exc_info=True)
return 'NOFILE'
-1
View File
@@ -409,7 +409,6 @@ class FilenameGenerator:
'generation_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if (self.p.n_iter == 1 and self.p.batch_size == 1) or self.zip else self.p.iteration * self.p.batch_size + self.p.batch_index + 1,
'hasprompt': lambda self, *args: self.hasprompt(*args), # accepts formats:[hasprompt<prompt1|default><prompt2>..]
'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"],
'randn_source': lambda self: opts.data["randn_source"],
'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT,
'user': lambda self: self.p.user,
'vae_filename': lambda self: self.get_vae_filename(),
+8 -56
View File
@@ -43,7 +43,9 @@ def check_python_version():
supported_minors = [7, 8, 9, 10, 11]
if not (major == 3 and minor in supported_minors):
errors.print_error_explanation(f"""
import modules.errors
modules.errors.print_error_explanation(f"""
INCOMPATIBLE PYTHON VERSION
This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}.
@@ -313,43 +315,9 @@ def requirements_met(requirements_file):
return True
def get_cuda_comp_cap():
"""
Returns float of CUDA Compute Capability using nvidia-smi
Returns 0.0 on error
CUDA Compute Capability
ref https://developer.nvidia.com/cuda-gpus
ref https://en.wikipedia.org/wiki/CUDA
Blackwell consumer GPUs should return 12.0 data-center GPUs should return 10.0
"""
try:
return max(map(float, subprocess.check_output(['nvidia-smi', '--query-gpu=compute_cap', '--format=noheader,csv'], text=True).splitlines()))
except Exception as _:
return 0.0
def early_access_blackwell_wheels():
"""For Blackwell GPUs, use Early Access PyTorch Wheels provided by Nvidia"""
print('deprecated early_access_blackwell_wheels')
if all([
os.environ.get('TORCH_INDEX_URL') is None,
sys.version_info.major == 3,
sys.version_info.minor in (10, 11, 12),
platform.system() == "Windows",
get_cuda_comp_cap() >= 10, # Blackwell
]):
base_repo = 'https://huggingface.co/w-e-w/torch-2.6.0-cu128.nv/resolve/main/'
ea_whl = {
10: f'{base_repo}torch-2.6.0+cu128.nv-cp310-cp310-win_amd64.whl#sha256=fef3de7ce8f4642e405576008f384304ad0e44f7b06cc1aa45e0ab4b6e70490d {base_repo}torchvision-0.20.0a0+cu128.nv-cp310-cp310-win_amd64.whl#sha256=50841254f59f1db750e7348b90a8f4cd6befec217ab53cbb03780490b225abef',
11: f'{base_repo}torch-2.6.0+cu128.nv-cp311-cp311-win_amd64.whl#sha256=6665c36e6a7e79e7a2cb42bec190d376be9ca2859732ed29dd5b7b5a612d0d26 {base_repo}torchvision-0.20.0a0+cu128.nv-cp311-cp311-win_amd64.whl#sha256=bbc0ee4938e35fe5a30de3613bfcd2d8ef4eae334cf8d49db860668f0bb47083',
12: f'{base_repo}torch-2.6.0+cu128.nv-cp312-cp312-win_amd64.whl#sha256=a3197f72379d34b08c4a4bcf49ea262544a484e8702b8c46cbcd66356c89def6 {base_repo}torchvision-0.20.0a0+cu128.nv-cp312-cp312-win_amd64.whl#sha256=235e7be71ac4e75b0f8e817bae4796d7bac8a67146d2037ab96394f2bdc63e6c'
}
return f'pip install {ea_whl.get(sys.version_info.minor)}'
def prepare_environment():
torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu128")
torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.7.0 torchvision==0.22.0 --extra-index-url {torch_index_url}")
torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu121")
torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.1.2 torchvision==0.16.2 --extra-index-url {torch_index_url}")
if args.use_ipex:
if platform.system() == "Windows":
# The "Nuullll/intel-extension-for-pytorch" wheels were built from IPEX source for Intel Arc GPU: https://github.com/intel/intel-extension-for-pytorch/tree/xpu-main
@@ -373,12 +341,12 @@ def prepare_environment():
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
requirements_file_for_npu = os.environ.get('REQS_FILE_FOR_NPU', "requirements_npu.txt")
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.30')
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.23.post1')
clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip")
openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip")
assets_repo = os.environ.get('ASSETS_REPO', "https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets.git")
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/w-e-w/stablediffusion.git")
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
stable_diffusion_xl_repo = os.environ.get('STABLE_DIFFUSION_XL_REPO', "https://github.com/Stability-AI/generative-models.git")
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
@@ -422,24 +390,8 @@ def prepare_environment():
)
startup_timer.record("torch GPU test")
# Ensure build dependencies are installed before any package that might need them
def ensure_build_dependencies():
"""Ensure essential build tools are available"""
if not is_installed("wheel"):
run_pip("install wheel", "wheel")
# Check setuptools version compatibility
try:
setuptools_version = run(f'"{python}" -c "import setuptools; print(setuptools.__version__)"', None, None).strip()
if setuptools_version >= "70":
run_pip("install setuptools==69.5.1", "setuptools")
except Exception:
# If setuptools check fails, install compatible version
run_pip("install setuptools==69.5.1", "setuptools")
# Install build dependencies early
ensure_build_dependencies()
if not is_installed("clip"):
run_pip(f"install --no-build-isolation {clip_package}", "clip")
run_pip(f"install {clip_package}", "clip")
startup_timer.record("install clip")
if not is_installed("open_clip"):
+25 -2
View File
@@ -187,6 +187,7 @@ class StableDiffusionProcessing:
cached_uc = [None, None]
cached_c = [None, None]
hijack_generation_params_state_list = []
comments: dict = None
sampler: sd_samplers_common.Sampler | None = field(default=None, init=False)
@@ -480,6 +481,10 @@ class StableDiffusionProcessing:
for cache in caches:
if cache[0] is not None and cached_params == cache[0]:
if len(cache) == 3:
generation_params_state, cached_params_2 = cache[2]
if cached_params == cached_params_2:
self.hijack_generation_params_state_list.extend(generation_params_state)
return cache[1]
cache = caches[0]
@@ -487,9 +492,25 @@ class StableDiffusionProcessing:
with devices.autocast():
cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling)
generation_params_state = model_hijack.capture_generation_params_state()
self.hijack_generation_params_state_list.extend(generation_params_state)
if len(cache) == 2:
cache.append((generation_params_state, cached_params))
else:
cache[2] = (generation_params_state, cached_params)
cache[0] = cached_params
return cache[1]
def apply_hijack_generation_params(self):
self.extra_generation_params.update(model_hijack.extra_generation_params)
for func in self.hijack_generation_params_state_list:
try:
func(self.extra_generation_params)
except Exception:
errors.report('Failed to apply hijack generation params state', exc_info=True)
self.hijack_generation_params_state_list.clear()
def setup_conds(self):
prompts = prompt_parser.SdConditioning(self.prompts, width=self.width, height=self.height)
negative_prompts = prompt_parser.SdConditioning(self.negative_prompts, width=self.width, height=self.height, is_negative_prompt=True)
@@ -502,6 +523,8 @@ class StableDiffusionProcessing:
self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data)
self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, total_steps, [self.cached_c], self.extra_network_data)
self.apply_hijack_generation_params()
def get_conds(self):
return self.c, self.uc
@@ -965,8 +988,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
p.setup_conds()
p.extra_generation_params.update(model_hijack.extra_generation_params)
# params.txt should be saved after scripts.process_batch, since the
# infotext could be modified by that callback
# Example: a wildcard processed by process_batch sets an extra model
@@ -1513,6 +1534,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, self.firstpass_steps, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data, total_steps)
self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, self.firstpass_steps, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data, total_steps)
self.apply_hijack_generation_params()
def setup_conds(self):
if self.is_hr_pass:
# if we are in hr pass right now, the call is being made from the refiner, and we don't need to setup firstpass cons or switch model
+8
View File
@@ -6,6 +6,7 @@ from modules import devices, sd_hijack_optimizations, shared, script_callbacks,
from modules.hypernetworks import hypernetwork
from modules.shared import cmd_opts
from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr, xlmr_m18
from modules.util import GenerationParamsState
import ldm.modules.attention
import ldm.modules.diffusionmodules.model
@@ -321,6 +322,13 @@ class StableDiffusionModelHijack:
self.comments = []
self.extra_generation_params = {}
def capture_generation_params_state(self):
state = []
for key in list(self.extra_generation_params):
if isinstance(self.extra_generation_params[key], GenerationParamsState):
state.append(self.extra_generation_params.pop(key))
return state
def get_prompt_lengths(self, text):
if self.clip is None:
return "-", "-"
+29 -6
View File
@@ -3,8 +3,9 @@ from collections import namedtuple
import torch
from modules import prompt_parser, devices, sd_hijack, sd_emphasis
from modules import prompt_parser, devices, sd_hijack, sd_emphasis, util
from modules.shared import opts
from modules.util import GenerationParamsState
class PromptChunk:
@@ -27,6 +28,31 @@ chunk. Those objects are found in PromptChunk.fixes and, are placed into FrozenC
are applied by sd_hijack.EmbeddingsWithFixes's forward function."""
class EmbeddingHashes(GenerationParamsState):
def __init__(self, hashes: list):
super().__init__()
self.hashes = hashes
def __call__(self, extra_generation_params):
unique_hashes = dict.fromkeys(self.hashes)
if existing_ti_hashes := extra_generation_params.get('TI hashes'):
unique_hashes.update(dict.fromkeys(existing_ti_hashes.split(', ')))
extra_generation_params['TI hashes'] = ', '.join(sorted(unique_hashes, key=util.natural_sort_key))
class EmphasisMode(GenerationParamsState):
def __init__(self, texts):
super().__init__()
if opts.emphasis != 'Original' and any(x for x in texts if '(' in x or '[' in x):
self.emphasis = opts.emphasis
else:
self.emphasis = None
def __call__(self, extra_generation_params):
if self.emphasis:
extra_generation_params['Emphasis'] = self.emphasis
class TextConditionalModel(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -238,12 +264,9 @@ class TextConditionalModel(torch.nn.Module):
hashes.append(f"{name}: {shorthash}")
if hashes:
if self.hijack.extra_generation_params.get("TI hashes"):
hashes.append(self.hijack.extra_generation_params.get("TI hashes"))
self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes)
self.hijack.extra_generation_params["TI hashes"] = EmbeddingHashes(hashes)
if any(x for x in texts if "(" in x or "[" in x) and opts.emphasis != "Original":
self.hijack.extra_generation_params["Emphasis"] = opts.emphasis
self.hijack.extra_generation_params["Emphasis"] = EmphasisMode(texts)
if self.return_pooled:
return torch.hstack(zs), zs[0].pooled
+1 -1
View File
@@ -54,7 +54,7 @@ class SdOptimizationXformers(SdOptimization):
priority = 100
def is_available(self):
return shared.cmd_opts.force_enable_xformers or (shared.xformers_available and torch.cuda.is_available() and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (12, 0))
return shared.cmd_opts.force_enable_xformers or (shared.xformers_available and torch.cuda.is_available() and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0))
def apply(self):
ldm.modules.attention.CrossAttention.forward = xformers_attention_forward
+16 -2
View File
@@ -13,7 +13,6 @@ from urllib import request
import ldm.modules.midas as midas
from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack, patches
from modules.hashes import partial_hash_from_cache as model_hash # noqa: F401 for backwards compatibility
from modules.timer import Timer
from modules.shared import opts
import tomesd
@@ -88,7 +87,7 @@ class CheckpointInfo:
self.name = name
self.name_for_extra = os.path.splitext(os.path.basename(filename))[0]
self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
self.hash = hashes.partial_hash_from_cache(filename)
self.hash = model_hash(filename)
self.sha256 = hashes.sha256_from_cache(self.filename, f"checkpoint/{name}")
self.shorthash = self.sha256[0:10] if self.sha256 else None
@@ -201,6 +200,21 @@ def get_closet_checkpoint_match(search_string):
return None
def model_hash(filename):
"""old hash that only looks at a small part of the file and is prone to collisions"""
try:
with open(filename, "rb") as file:
import hashlib
m = hashlib.sha256()
file.seek(0x100000)
m.update(file.read(0x10000))
return m.hexdigest()[0:8]
except FileNotFoundError:
return 'NOFILE'
def select_checkpoint():
"""Raises `FileNotFoundError` if no checkpoints are found."""
model_checkpoint = shared.opts.sd_model_checkpoint
+4 -7
View File
@@ -117,15 +117,12 @@ def ddim_scheduler(n, sigma_min, sigma_max, inner_model, device):
def beta_scheduler(n, sigma_min, sigma_max, inner_model, device):
# From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)
# From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024) """
alpha = shared.opts.beta_dist_alpha
beta = shared.opts.beta_dist_beta
curve = [stats.beta.ppf(x, alpha, beta) for x in np.linspace(1, 0, n)]
start = inner_model.sigma_to_t(torch.tensor(sigma_max))
end = inner_model.sigma_to_t(torch.tensor(sigma_min))
timesteps = [end + x * (start - end) for x in curve]
sigmas = [inner_model.t_to_sigma(ts) for ts in timesteps]
timesteps = 1 - np.linspace(0, 1, n)
timesteps = [stats.beta.ppf(x, alpha, beta) for x in timesteps]
sigmas = [sigma_min + (x * (sigma_max-sigma_min)) for x in timesteps]
sigmas += [0.0]
return torch.FloatTensor(sigmas).to(device)
+1 -1
View File
@@ -125,7 +125,7 @@ def ui_reorder_categories():
def callbacks_order_settings():
options = {
"callbacks_order_explanation": OptionHTML("""
"sd_vae_explanation": OptionHTML("""
For categories below, callbacks added to dropdowns happen before others, in order listed.
"""),
+4 -5
View File
@@ -33,12 +33,12 @@ categories.register_category("training", "Training")
options_templates.update(options_section(('saving-images', "Saving images/grids", "saving"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images', ui_components.DropdownEditable, {"choices": ("png", "jpg", "jpeg", "webp", "avif")}).info("manual input of <a href='https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html' target='_blank'>other formats</a> is possible, but compatibility is not guaranteed"),
"samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
"save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
"save_images_replace_action": OptionInfo("Replace", "Saving the image to an existing file", gr.Radio, {"choices": ["Replace", "Add number suffix"], **hide_dirs}),
"grid_save": OptionInfo(True, "Always save all generated image grids"),
"grid_format": OptionInfo('png', 'File format for grids', ui_components.DropdownEditable, {"choices": ("png", "jpg", "jpeg", "webp", "avif")}).info("manual input of <a href='https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html' target='_blank'>other formats</a> is possible, but compatibility is not guaranteed"),
"grid_format": OptionInfo('png', 'File format for grids'),
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
@@ -128,7 +128,6 @@ options_templates.update(options_section(('system', "System", "system"), {
"disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"),
"hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."),
"dump_stacks_on_signal": OptionInfo(False, "Print stack traces before exiting the program with ctrl+c."),
"concurrent_git_fetch_limit": OptionInfo(16, "Number of simultaneous extension update checks ", gr.Slider, {"step": 1, "minimum": 1, "maximum": 100}).info("reduce extension update check time"),
}))
options_templates.update(options_section(('profiler', "Profiler", "system"), {
@@ -407,8 +406,8 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'),
'sd_noise_schedule': OptionInfo("Default", "Noise schedule for sampling", gr.Radio, {"choices": ["Default", "Zero Terminal SNR"]}, infotext="Noise Schedule").info("for use with zero terminal SNR trained models"),
'skip_early_cond': OptionInfo(0.0, "Ignore negative prompt during early sampling", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext="Skip Early CFG").info("disables CFG on a proportion of steps at the beginning of generation; 0=skip none; 1=skip all; can both improve sample diversity/quality and speed up sampling; XYZ plot: Skip Early CFG"),
'beta_dist_alpha': OptionInfo(0.6, "Beta scheduler - alpha", gr.Slider, {"minimum": 0.01, "maximum": 5.0, "step": 0.01}, infotext='Beta scheduler alpha').info('Default = 0.6; the alpha parameter of the beta distribution used in Beta sampling'),
'beta_dist_beta': OptionInfo(0.6, "Beta scheduler - beta", gr.Slider, {"minimum": 0.01, "maximum": 5.0, "step": 0.01}, infotext='Beta scheduler beta').info('Default = 0.6; the beta parameter of the beta distribution used in Beta sampling'),
'beta_dist_alpha': OptionInfo(0.6, "Beta scheduler - alpha", gr.Slider, {"minimum": 0.01, "maximum": 1.0, "step": 0.01}, infotext='Beta scheduler alpha').info('Default = 0.6; the alpha parameter of the beta distribution used in Beta sampling'),
'beta_dist_beta': OptionInfo(0.6, "Beta scheduler - beta", gr.Slider, {"minimum": 0.01, "maximum": 1.0, "step": 0.01}, infotext='Beta scheduler beta').info('Default = 0.6; the beta parameter of the beta distribution used in Beta sampling'),
}))
options_templates.update(options_section(('postprocessing', "Postprocessing", "postprocessing"), {
+4 -11
View File
@@ -1,6 +1,5 @@
import json
import os
from concurrent.futures import ThreadPoolExecutor
import threading
import time
from datetime import datetime, timezone
@@ -107,24 +106,18 @@ def check_updates(id_task, disable_list):
exts = [ext for ext in extensions.extensions if ext.remote is not None and ext.name not in disabled]
shared.state.job_count = len(exts)
lock = threading.Lock()
for ext in exts:
shared.state.textinfo = ext.name
def _check_update(ext):
try:
ext.check_updates()
except FileNotFoundError as e:
if 'FETCH_HEAD' not in str(e):
raise
except Exception:
with lock:
errors.report(f"Error checking updates for {ext.name}", exc_info=True)
with lock:
shared.state.textinfo = ext.name
shared.state.nextjob()
errors.report(f"Error checking updates for {ext.name}", exc_info=True)
with ThreadPoolExecutor(max_workers=max(1, int(shared.opts.concurrent_git_fetch_limit))) as executor:
for ext in exts:
executor.submit(_check_update, ext)
shared.state.nextjob()
return extension_table(), ""
+15 -1
View File
@@ -1,4 +1,3 @@
from __future__ import annotations
import os
import re
@@ -289,3 +288,18 @@ def compare_sha256(file_path: str, hash_prefix: str) -> bool:
for chunk in iter(lambda: f.read(blksize), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest().startswith(hash_prefix.strip().lower())
class GenerationParamsState:
"""A custom class used in StableDiffusionModelHijack for assigning extra_generation_params
generation_params assigned using this class will work properly with StableDiffusionProcessing.get_conds_with_caching()
if assigned directly the generation_params will not be populated if conda cache is used
Generation_params of this class will be captured (see StableDiffusionModelHijack.capture_generation_params_state) and stored with conda cache, and will be extracted in StableDiffusionProcessing.apply_hijack_generation_params()
To use this class, create a subclass with a __call__ method that takes extra_generation_params: dict as input
Example usage: sd_hijack_clip.EmbeddingHashes, sd_hijack_clip.EmphasisMode
"""
def __call__(self, extra_generation_params: dict):
raise NotImplementedError
-50
View File
@@ -1,50 +0,0 @@
import sys
import copy
import shlex
import subprocess
from functools import wraps
BAD_FLAGS = ("--prefer-binary", '-I', '--ignore-installed')
def patch():
if hasattr(subprocess, "__original_run"):
return
print("using uv")
try:
subprocess.run(['uv', '-V'])
except FileNotFoundError:
subprocess.run([sys.executable, '-m', 'pip', 'install', 'uv'])
subprocess.__original_run = subprocess.run
@wraps(subprocess.__original_run)
def patched_run(*args, **kwargs):
_kwargs = copy.copy(kwargs)
if args:
command, *_args = args
else:
command, _args = _kwargs.pop("args", ""), ()
if isinstance(command, str):
command = shlex.split(command)
else:
command = [arg.strip() for arg in command]
if not isinstance(command, list) or "pip" not in command:
return subprocess.__original_run(*args, **kwargs)
cmd = command[command.index("pip") + 1:]
cmd = [arg for arg in cmd if arg not in BAD_FLAGS]
modified_command = ["uv", "pip", *cmd]
cmd_str = shlex.join([*modified_command, *_args])
result = subprocess.__original_run(cmd_str, **_kwargs)
if result.returncode != 0:
return subprocess.__original_run(*args, **kwargs)
return result
subprocess.run = patched_run
+1 -1
View File
@@ -182,7 +182,7 @@ document.addEventListener('keydown', function(e) {
const lightboxModal = document.querySelector('#lightboxModal');
if (!globalPopup || globalPopup.style.display === 'none') {
if (document.activeElement === lightboxModal) return;
if (interruptButton?.style.display === 'block') {
if (interruptButton.style.display === 'block') {
interruptButton.click();
e.preventDefault();
}
-4
View File
@@ -29,10 +29,6 @@ class ScriptPostprocessingCodeFormer(scripts_postprocessing.ScriptPostprocessing
res = Image.fromarray(restored_img)
if codeformer_visibility < 1.0:
if pp.image.size != res.size:
res = res.resize(pp.image.size)
if pp.image.mode != res.mode:
res = res.convert(pp.image.mode)
res = Image.blend(pp.image, res, codeformer_visibility)
pp.image = res
-4
View File
@@ -26,10 +26,6 @@ class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing):
res = Image.fromarray(restored_img)
if gfpgan_visibility < 1.0:
if pp.image.size != res.size:
res = res.resize(pp.image.size)
if pp.image.mode != res.mode:
res = res.convert(pp.image.mode)
res = Image.blend(pp.image, res, gfpgan_visibility)
pp.image = res
+1 -4
View File
@@ -480,10 +480,8 @@ div.toprow-compact-tools{
}
#settings_result{
min-height: 1.4em;
height: 1.4em;
margin: 0 1.2em;
max-height: calc(var(--text-md) * var(--line-sm) * 5);
overflow-y: auto;
}
table.popup-table{
@@ -602,7 +600,6 @@ table.popup-table .link{
background: var(--background-fill-primary);
width: 100%;
height: 100%;
pointer-events: none;
}
.livePreview img{
+1 -1
View File
@@ -127,7 +127,7 @@ then
fi
# Check prerequisites
gpu_info=$(lspci 2>/dev/null | grep -E "VGA|Display|CMP")
gpu_info=$(lspci 2>/dev/null | grep -E "VGA|Display")
case "$gpu_info" in
*"Navi 1"*)
export HSA_OVERRIDE_GFX_VERSION=10.3.0