Signed-off-by: vladmandic <mandic00@live.com>
This commit is contained in:
vladmandic
2026-04-02 20:15:30 +02:00
parent 668a94141d
commit 9d0ecde462
11 changed files with 92 additions and 6 deletions

View File

@@ -1,5 +1,13 @@
# Change Log for SD.Next
## Update for 2026-04-02
- **Models**
- [AiArtLab SDXS-1B](https://huggingface.co/AiArtLab/sdxs-1b) Simple Diffusion XS *(training still in progress)*
this model combines Qwen3.5-1.8B text encoder with SDXL-style UNET with only 1.6B parameters and custom 32ch VAE
- **Internal**
- additional typing and typechecks, thanks @awsr
## Update for 2026-04-01
### Highlights for 2026-04-01

View File

@@ -864,6 +864,16 @@
"extras": "sampler: Default, cfg_scale: 1.5, steps: 50",
"size": 15.3,
"date": "2025 January"
},
"AiArtLab SDXS-1B": {
"path": "AiArtLab/sdxs-1b",
"preview": "AiArtLab--sdxs-1b.jpg",
"desc": "Simple Diffusion XS (train in progress) combines Qwen3.5-1.8B text encoder with SDXL-style UNET with only 1.6B parameters and custom 32ch VAE",
"skip": true,
"extras": "sampler: Default",
"size": 15.3,
"date": "2026 January"
}
}

View File

View File

@@ -100,6 +100,8 @@ def get_model_type(pipe):
model_type = 'hunyuanimage3'
elif 'HunyuanImage' in name:
model_type = 'hunyuanimage'
elif 'sdxs-1b' in name:
model_type = 'sdxs'
# video models
elif "Kandinsky5" in name and '2V' in name:
model_type = 'kandinsky5video'

View File

@@ -325,8 +325,8 @@ class StableDiffusionProcessing:
# initializers
self.prompt = prompt
self.seed = seed
self.subseed = subseed
self.seed = int(seed)
self.subseed = int(subseed)
self.subseed_strength = subseed_strength
self.seed_resize_from_h = seed_resize_from_h
self.seed_resize_from_w = seed_resize_from_w

View File

@@ -150,6 +150,8 @@ def guess_by_name(fn, current_guess):
new_guess = 'Ovis-Image'
elif 'glm-image' in fn.lower():
new_guess = 'GLM-Image'
elif 'sdxs-1b' in fn.lower():
new_guess = 'SDXS'
if debug_load:
log.trace(f'Autodetect: method=name file="{fn}" previous="{current_guess}" current="{new_guess}"')
return new_guess or current_guess
@@ -166,6 +168,8 @@ def guess_by_diffusers(fn, current_guess):
if name is not None and name in exclude_by_name:
return current_guess, None
cls = index.get('_class_name', None)
if isinstance(cls, list):
cls = cls[-1]
if cls is not None:
pipeline = getattr(diffusers, cls, None)
if pipeline is None:

View File

@@ -8,7 +8,7 @@ def hijack_encode_prompt(*args, **kwargs):
jobid = shared.state.begin('TE Encode')
t0 = time.time()
if 'max_sequence_length' in kwargs and kwargs['max_sequence_length'] is not None:
kwargs['max_sequence_length'] = max(kwargs['max_sequence_length'], os.environ.get('HIDREAM_MAX_SEQUENCE_LENGTH', 256))
kwargs['max_sequence_length'] = max(kwargs['max_sequence_length'], os.environ.get('MAX_SEQUENCE_LENGTH', 256))
try:
prompt = kwargs.get('prompt', None) or (args[0] if len(args) > 0 else None)
if prompt is not None:
@@ -20,8 +20,6 @@ def hijack_encode_prompt(*args, **kwargs):
res = None
t1 = time.time()
timer.process.add('te', t1-t0)
# if hasattr(shared.sd_model, "maybe_free_model_hooks"):
# shared.sd_model.maybe_free_model_hooks()
shared.sd_model = sd_models.apply_balanced_offload(shared.sd_model)
shared.state.end(jobid)
return res

View File

@@ -507,6 +507,10 @@ def load_diffuser_force(detected_model_type, checkpoint_info, diffusers_load_con
from pipelines.model_glm import load_glm_image
sd_model = load_glm_image(checkpoint_info, diffusers_load_config)
allow_post_quant = False
elif model_type in ['SDXS']:
from pipelines.model_sdxs import load_sdxs
sd_model = load_sdxs(checkpoint_info, diffusers_load_config)
allow_post_quant = False
except Exception as e:
log.error(f'Load {op}: path="{checkpoint_info.path}" {e}')
# if debug_load:

View File

@@ -65,6 +65,7 @@ pipelines = {
'HunyuanImage3': getattr(diffusers, 'DiffusionPipeline', None),
'ChronoEdit': getattr(diffusers, 'DiffusionPipeline', None),
'Anima': getattr(diffusers, 'DiffusionPipeline', None),
'SDXS': getattr(diffusers, 'DiffusionPipeline', None),
}

59
pipelines/model_sdxs.py Normal file
View File

@@ -0,0 +1,59 @@
import time
import diffusers
import transformers
from modules import shared, devices, errors, timer, sd_models, model_quant, sd_hijack_vae
from modules.logger import log
from pipelines import generic
def hijack_encode_text(prompt: str | list[str]):
jobid = shared.state.begin('TE Encode')
t0 = time.time()
try:
prompt = shared.sd_model.refine_prompts(prompt)
except Exception as e:
log.error(f'Encode prompt: {e}')
errors.display(e, 'Encode prompt')
try:
res = shared.sd_model.orig_encode_text(prompt)
except Exception as e:
log.error(f'Encode prompt: {e}')
errors.display(e, 'Encode prompt')
res = None
t1 = time.time()
timer.process.add('te', t1-t0)
shared.sd_model = sd_models.apply_balanced_offload(shared.sd_model)
shared.state.end(jobid)
return res
def load_sdxs(checkpoint_info, diffusers_load_config=None):
if diffusers_load_config is None:
diffusers_load_config = {}
repo_id = sd_models.path_to_repo(checkpoint_info)
sd_models.hf_auth_check(checkpoint_info)
load_args, _quant_args = model_quant.get_dit_args(diffusers_load_config, allow_quant=False)
log.debug(f'Load model: type=SDXS repo="{repo_id}" config={diffusers_load_config} offload={shared.opts.diffusers_offload_mode} dtype={devices.dtype} args={load_args}')
text_encoder = generic.load_text_encoder(repo_id, cls_name=transformers.Qwen3_5ForConditionalGeneration, load_config=diffusers_load_config, allow_shared=False)
pipe = diffusers.DiffusionPipeline.from_pretrained(
repo_id,
text_encoder=text_encoder,
cache_dir=shared.opts.diffusers_dir,
trust_remote_code=True,
**load_args,
)
pipe.task_args = {
'generator': None,
'output_type': 'np',
}
pipe.orig_encode_text = pipe.encode_text
pipe.encode_text = hijack_encode_text
sd_hijack_vae.init_hijack(pipe)
del text_encoder
devices.torch_gc(force=True, reason='load')
return pipe

2
wiki

Submodule wiki updated: d54ade8e5f...a9b73a5001