Skip to content

Commit d47708b

Browse files
RyanJDickbrandonrising
authored andcommitted
Rename LoRAModelRaw to ModelPatchRaw.
1 parent b9854c7 commit d47708b

15 files changed

+52
-50
lines changed

invokeai/app/invocations/compel.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
from invokeai.app.services.shared.invocation_context import InvocationContext
2121
from invokeai.app.util.ti_utils import generate_ti_list
2222
from invokeai.backend.model_patcher import ModelPatcher
23-
from invokeai.backend.patches.lora_model_raw import LoRAModelRaw
23+
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
2424
from invokeai.backend.patches.model_patcher import ModelPatcher
2525
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
2626
BasicConditioningInfo,
@@ -66,10 +66,10 @@ def invoke(self, context: InvocationContext) -> ConditioningOutput:
6666
tokenizer_info = context.models.load(self.clip.tokenizer)
6767
text_encoder_info = context.models.load(self.clip.text_encoder)
6868

69-
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
69+
def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
7070
for lora in self.clip.loras:
7171
lora_info = context.models.load(lora.lora)
72-
assert isinstance(lora_info.model, LoRAModelRaw)
72+
assert isinstance(lora_info.model, ModelPatchRaw)
7373
yield (lora_info.model, lora.weight)
7474
del lora_info
7575
return
@@ -162,11 +162,11 @@ def run_clip_compel(
162162
c_pooled = None
163163
return c, c_pooled
164164

165-
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
165+
def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
166166
for lora in clip_field.loras:
167167
lora_info = context.models.load(lora.lora)
168168
lora_model = lora_info.model
169-
assert isinstance(lora_model, LoRAModelRaw)
169+
assert isinstance(lora_model, ModelPatchRaw)
170170
yield (lora_model, lora.weight)
171171
del lora_info
172172
return

invokeai/app/invocations/denoise_latents.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
4040
from invokeai.backend.model_manager import BaseModelType, ModelVariantType
4141
from invokeai.backend.model_patcher import ModelPatcher
42-
from invokeai.backend.patches.lora_model_raw import LoRAModelRaw
42+
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
4343
from invokeai.backend.patches.model_patcher import ModelPatcher
4444
from invokeai.backend.stable_diffusion import PipelineIntermediateState
4545
from invokeai.backend.stable_diffusion.denoise_context import DenoiseContext, DenoiseInputs
@@ -987,10 +987,10 @@ def _old_invoke(self, context: InvocationContext) -> LatentsOutput:
987987
def step_callback(state: PipelineIntermediateState) -> None:
988988
context.util.sd_step_callback(state, unet_config.base)
989989

990-
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
990+
def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
991991
for lora in self.unet.loras:
992992
lora_info = context.models.load(lora.lora)
993-
assert isinstance(lora_info.model, LoRAModelRaw)
993+
assert isinstance(lora_info.model, ModelPatchRaw)
994994
yield (lora_info.model, lora.weight)
995995
del lora_info
996996
return

invokeai/app/invocations/flux_denoise.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
from invokeai.backend.flux.text_conditioning import FluxTextConditioning
4949
from invokeai.backend.model_manager.config import ModelFormat
5050
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
51-
from invokeai.backend.patches.lora_model_raw import LoRAModelRaw
51+
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
5252
from invokeai.backend.patches.model_patcher import ModelPatcher
5353
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
5454
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import FLUXConditioningInfo
@@ -697,13 +697,13 @@ def _prep_ip_adapter_extensions(
697697

698698
return pos_ip_adapter_extensions, neg_ip_adapter_extensions
699699

700-
def _lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[LoRAModelRaw, float]]:
700+
def _lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[ModelPatchRaw, float]]:
701701
loras: list[Union[LoRAField, ControlLoRAField]] = [*self.transformer.loras]
702702
if self.control_lora:
703703
loras.append(self.control_lora)
704704
for lora in loras:
705705
lora_info = context.models.load(lora.lora)
706-
assert isinstance(lora_info.model, LoRAModelRaw)
706+
assert isinstance(lora_info.model, ModelPatchRaw)
707707
yield (lora_info.model, lora.weight)
708708
del lora_info
709709

invokeai/app/invocations/flux_text_encoder.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from invokeai.backend.flux.modules.conditioner import HFEncoder
2020
from invokeai.backend.model_manager.config import ModelFormat
2121
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
22-
from invokeai.backend.patches.lora_model_raw import LoRAModelRaw
22+
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
2323
from invokeai.backend.patches.model_patcher import ModelPatcher
2424
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, FLUXConditioningInfo
2525

@@ -130,9 +130,9 @@ def _clip_encode(self, context: InvocationContext) -> torch.Tensor:
130130
assert isinstance(pooled_prompt_embeds, torch.Tensor)
131131
return pooled_prompt_embeds
132132

133-
def _clip_lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[LoRAModelRaw, float]]:
133+
def _clip_lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[ModelPatchRaw, float]]:
134134
for lora in self.clip.loras:
135135
lora_info = context.models.load(lora.lora)
136-
assert isinstance(lora_info.model, LoRAModelRaw)
136+
assert isinstance(lora_info.model, ModelPatchRaw)
137137
yield (lora_info.model, lora.weight)
138138
del lora_info

invokeai/app/invocations/sd3_text_encoder.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
from invokeai.app.services.shared.invocation_context import InvocationContext
1919
from invokeai.backend.model_manager.config import ModelFormat
2020
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
21-
from invokeai.backend.patches.lora_model_raw import LoRAModelRaw
21+
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
2222
from invokeai.backend.patches.model_patcher import ModelPatcher
2323
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, SD3ConditioningInfo
2424

@@ -193,9 +193,9 @@ def _clip_encode(
193193

194194
def _clip_lora_iterator(
195195
self, context: InvocationContext, clip_model: CLIPField
196-
) -> Iterator[Tuple[LoRAModelRaw, float]]:
196+
) -> Iterator[Tuple[ModelPatchRaw, float]]:
197197
for lora in clip_model.loras:
198198
lora_info = context.models.load(lora.lora)
199-
assert isinstance(lora_info.model, LoRAModelRaw)
199+
assert isinstance(lora_info.model, ModelPatchRaw)
200200
yield (lora_info.model, lora.weight)
201201
del lora_info

invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
from invokeai.app.invocations.model import UNetField
2323
from invokeai.app.invocations.primitives import LatentsOutput
2424
from invokeai.app.services.shared.invocation_context import InvocationContext
25-
from invokeai.backend.patches.lora_model_raw import LoRAModelRaw
25+
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
2626
from invokeai.backend.patches.model_patcher import ModelPatcher
2727
from invokeai.backend.stable_diffusion.diffusers_pipeline import ControlNetData, PipelineIntermediateState
2828
from invokeai.backend.stable_diffusion.multi_diffusion_pipeline import (
@@ -194,10 +194,10 @@ def step_callback(state: PipelineIntermediateState) -> None:
194194
context.util.sd_step_callback(state, unet_config.base)
195195

196196
# Prepare an iterator that yields the UNet's LoRA models and their weights.
197-
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
197+
def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
198198
for lora in self.unet.loras:
199199
lora_info = context.models.load(lora.lora)
200-
assert isinstance(lora_info.model, LoRAModelRaw)
200+
assert isinstance(lora_info.model, ModelPatchRaw)
201201
yield (lora_info.model, lora.weight)
202202
del lora_info
203203

invokeai/backend/model_manager/load/model_util.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
1818
from invokeai.backend.model_manager.config import AnyModel
1919
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
20-
from invokeai.backend.patches.lora_model_raw import LoRAModelRaw
20+
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
2121
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
2222
from invokeai.backend.textual_inversion import TextualInversionModelRaw
2323
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
@@ -43,7 +43,7 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int:
4343
(
4444
TextualInversionModelRaw,
4545
IPAdapter,
46-
LoRAModelRaw,
46+
ModelPatchRaw,
4747
SpandrelImageToImageModel,
4848
GroundingDinoPipeline,
4949
SegmentAnythingPipeline,

invokeai/backend/patches/lora_conversions/flux_control_lora_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from invokeai.backend.patches.layers.lora_layer import LoRALayer
99
from invokeai.backend.patches.layers.set_parameter_layer import SetParameterLayer
1010
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
11-
from invokeai.backend.patches.lora_model_raw import LoRAModelRaw
11+
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
1212

1313
# A regex pattern that matches all of the keys in the Flux Dev/Canny LoRA format.
1414
# Example keys:
@@ -30,7 +30,7 @@ def is_state_dict_likely_flux_control(state_dict: Dict[str, Any]) -> bool:
3030
)
3131

3232

33-
def lora_model_from_flux_control_state_dict(state_dict: Dict[str, torch.Tensor]) -> LoRAModelRaw:
33+
def lora_model_from_flux_control_state_dict(state_dict: Dict[str, torch.Tensor]) -> ModelPatchRaw:
3434
# converted_state_dict = _convert_lora_bfl_control(state_dict=state_dict)
3535
# Group keys by layer.
3636
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = {}
@@ -72,4 +72,4 @@ def lora_model_from_flux_control_state_dict(state_dict: Dict[str, torch.Tensor])
7272
else:
7373
raise AssertionError(f"{layer_key} not expected")
7474
# Create and return the LoRAModelRaw.
75-
return LoRAModelRaw(layers=layers)
75+
return ModelPatchRaw(layers=layers)

invokeai/backend/patches/lora_conversions/flux_diffusers_lora_conversion_utils.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from invokeai.backend.patches.layers.concatenated_lora_layer import ConcatenatedLoRALayer
77
from invokeai.backend.patches.layers.lora_layer import LoRALayer
88
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
9-
from invokeai.backend.patches.lora_model_raw import LoRAModelRaw
9+
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
1010

1111

1212
def is_state_dict_likely_in_flux_diffusers_format(state_dict: Dict[str, torch.Tensor]) -> bool:
@@ -30,7 +30,9 @@ def is_state_dict_likely_in_flux_diffusers_format(state_dict: Dict[str, torch.Te
3030
return all_keys_in_peft_format and all_expected_keys_present
3131

3232

33-
def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor], alpha: float | None) -> LoRAModelRaw:
33+
def lora_model_from_flux_diffusers_state_dict(
34+
state_dict: Dict[str, torch.Tensor], alpha: float | None
35+
) -> ModelPatchRaw:
3436
"""Loads a state dict in the Diffusers FLUX LoRA format into a LoRAModelRaw object.
3537
3638
This function is based on:
@@ -215,7 +217,7 @@ def add_qkv_lora_layer_if_present(
215217

216218
layers_with_prefix = {f"{FLUX_LORA_TRANSFORMER_PREFIX}{k}": v for k, v in layers.items()}
217219

218-
return LoRAModelRaw(layers=layers_with_prefix)
220+
return ModelPatchRaw(layers=layers_with_prefix)
219221

220222

221223
def _group_by_layer(state_dict: Dict[str, torch.Tensor]) -> dict[str, dict[str, torch.Tensor]]:

invokeai/backend/patches/lora_conversions/flux_kohya_lora_conversion_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
FLUX_LORA_CLIP_PREFIX,
1010
FLUX_LORA_TRANSFORMER_PREFIX,
1111
)
12-
from invokeai.backend.patches.lora_model_raw import LoRAModelRaw
12+
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
1313

1414
# A regex pattern that matches all of the transformer keys in the Kohya FLUX LoRA format.
1515
# Example keys:
@@ -39,7 +39,7 @@ def is_state_dict_likely_in_flux_kohya_format(state_dict: Dict[str, Any]) -> boo
3939
)
4040

4141

42-
def lora_model_from_flux_kohya_state_dict(state_dict: Dict[str, torch.Tensor]) -> LoRAModelRaw:
42+
def lora_model_from_flux_kohya_state_dict(state_dict: Dict[str, torch.Tensor]) -> ModelPatchRaw:
4343
# Group keys by layer.
4444
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = {}
4545
for key, value in state_dict.items():
@@ -71,7 +71,7 @@ def lora_model_from_flux_kohya_state_dict(state_dict: Dict[str, torch.Tensor]) -
7171
layers[FLUX_LORA_CLIP_PREFIX + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
7272

7373
# Create and return the LoRAModelRaw.
74-
return LoRAModelRaw(layers=layers)
74+
return ModelPatchRaw(layers=layers)
7575

7676

7777
T = TypeVar("T")

invokeai/backend/patches/lora_conversions/sd_lora_conversion_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,17 +4,17 @@
44

55
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
66
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
7-
from invokeai.backend.patches.lora_model_raw import LoRAModelRaw
7+
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
88

99

10-
def lora_model_from_sd_state_dict(state_dict: Dict[str, torch.Tensor]) -> LoRAModelRaw:
10+
def lora_model_from_sd_state_dict(state_dict: Dict[str, torch.Tensor]) -> ModelPatchRaw:
1111
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = _group_state(state_dict)
1212

1313
layers: dict[str, BaseLayerPatch] = {}
1414
for layer_key, values in grouped_state_dict.items():
1515
layers[layer_key] = any_lora_layer_from_state_dict(values)
1616

17-
return LoRAModelRaw(layers=layers)
17+
return ModelPatchRaw(layers=layers)
1818

1919

2020
def _group_state(state_dict: Dict[str, torch.Tensor]) -> Dict[str, Dict[str, torch.Tensor]]:

invokeai/backend/patches/lora_model_raw.py renamed to invokeai/backend/patches/model_patch_raw.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from invokeai.backend.raw_model import RawModel
88

99

10-
class LoRAModelRaw(RawModel):
10+
class ModelPatchRaw(RawModel):
1111
def __init__(self, layers: Mapping[str, BaseLayerPatch]):
1212
self.layers = layers
1313

invokeai/backend/patches/model_patcher.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
77
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
8-
from invokeai.backend.patches.lora_model_raw import LoRAModelRaw
8+
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
99
from invokeai.backend.patches.pad_with_zeros import pad_with_zeros
1010
from invokeai.backend.patches.sidecar_wrappers.base_sidecar_wrapper import BaseSidecarWrapper
1111
from invokeai.backend.patches.sidecar_wrappers.utils import wrap_module_with_sidecar_wrapper
@@ -19,7 +19,7 @@ class ModelPatcher:
1919
@contextmanager
2020
def apply_model_patches(
2121
model: torch.nn.Module,
22-
patches: Iterable[Tuple[LoRAModelRaw, float]],
22+
patches: Iterable[Tuple[ModelPatchRaw, float]],
2323
prefix: str,
2424
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
2525
):
@@ -57,7 +57,7 @@ def apply_model_patches(
5757
def apply_model_patch(
5858
model: torch.nn.Module,
5959
prefix: str,
60-
patch: LoRAModelRaw,
60+
patch: ModelPatchRaw,
6161
patch_weight: float,
6262
original_weights: OriginalWeightsStorage,
6363
):
@@ -148,7 +148,7 @@ def _apply_model_layer_patch(
148148
@contextmanager
149149
def apply_model_sidecar_patches(
150150
model: torch.nn.Module,
151-
patches: Iterable[Tuple[LoRAModelRaw, float]],
151+
patches: Iterable[Tuple[ModelPatchRaw, float]],
152152
prefix: str,
153153
dtype: torch.dtype,
154154
):
@@ -189,7 +189,7 @@ def apply_model_sidecar_patches(
189189
@staticmethod
190190
def _apply_model_sidecar_patch(
191191
model: torch.nn.Module,
192-
patch: LoRAModelRaw,
192+
patch: ModelPatchRaw,
193193
patch_weight: float,
194194
prefix: str,
195195
original_modules: dict[str, torch.nn.Module],

invokeai/backend/stable_diffusion/extensions/lora.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
from diffusers import UNet2DConditionModel
77

8-
from invokeai.backend.patches.lora_model_raw import LoRAModelRaw
8+
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
99
from invokeai.backend.patches.model_patcher import ModelPatcher
1010
from invokeai.backend.stable_diffusion.extensions.base import ExtensionBase
1111

@@ -30,7 +30,7 @@ def __init__(
3030
@contextmanager
3131
def patch_unet(self, unet: UNet2DConditionModel, original_weights: OriginalWeightsStorage):
3232
lora_model = self._node_context.models.load(self._model_id).model
33-
assert isinstance(lora_model, LoRAModelRaw)
33+
assert isinstance(lora_model, ModelPatchRaw)
3434
ModelPatcher.apply_model_patch(
3535
model=unet,
3636
prefix="lora_unet_",

0 commit comments

Comments
 (0)