diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py
index e829829cf36..fe8943bfcd9 100644
--- a/invokeai/app/invocations/compel.py
+++ b/invokeai/app/invocations/compel.py
@@ -95,6 +95,7 @@ def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
ti_manager,
),
):
+ context.util.signal_progress("Building conditioning")
assert isinstance(text_encoder, CLIPTextModel)
assert isinstance(tokenizer, CLIPTokenizer)
compel = Compel(
@@ -191,6 +192,7 @@ def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
ti_manager,
),
):
+ context.util.signal_progress("Building conditioning")
assert isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection))
assert isinstance(tokenizer, CLIPTokenizer)
diff --git a/invokeai/app/invocations/create_denoise_mask.py b/invokeai/app/invocations/create_denoise_mask.py
index 2d66c20dbd4..15e95f49b01 100644
--- a/invokeai/app/invocations/create_denoise_mask.py
+++ b/invokeai/app/invocations/create_denoise_mask.py
@@ -65,6 +65,7 @@ def invoke(self, context: InvocationContext) -> DenoiseMaskOutput:
img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
# TODO:
+ context.util.signal_progress("Running VAE encoder")
masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone())
masked_latents_name = context.tensors.save(tensor=masked_latents)
diff --git a/invokeai/app/invocations/create_gradient_mask.py b/invokeai/app/invocations/create_gradient_mask.py
index 18f586e7f17..d32d3c85212 100644
--- a/invokeai/app/invocations/create_gradient_mask.py
+++ b/invokeai/app/invocations/create_gradient_mask.py
@@ -131,6 +131,7 @@ def invoke(self, context: InvocationContext) -> GradientMaskOutput:
image_tensor = image_tensor.unsqueeze(0)
img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
+ context.util.signal_progress("Running VAE encoder")
masked_latents = ImageToLatentsInvocation.vae_encode(
vae_info, self.fp32, self.tiled, masked_image.clone()
)
diff --git a/invokeai/app/invocations/flux_text_encoder.py b/invokeai/app/invocations/flux_text_encoder.py
index 209988ce807..af250f0f3b1 100644
--- a/invokeai/app/invocations/flux_text_encoder.py
+++ b/invokeai/app/invocations/flux_text_encoder.py
@@ -71,6 +71,7 @@ def _t5_encode(self, context: InvocationContext) -> torch.Tensor:
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
+ context.util.signal_progress("Running T5 encoder")
prompt_embeds = t5_encoder(prompt)
assert isinstance(prompt_embeds, torch.Tensor)
@@ -111,6 +112,7 @@ def _clip_encode(self, context: InvocationContext) -> torch.Tensor:
clip_encoder = HFEncoder(clip_text_encoder, clip_tokenizer, True, 77)
+ context.util.signal_progress("Running CLIP encoder")
pooled_prompt_embeds = clip_encoder(prompt)
assert isinstance(pooled_prompt_embeds, torch.Tensor)
diff --git a/invokeai/app/invocations/flux_vae_decode.py b/invokeai/app/invocations/flux_vae_decode.py
index 7732f1f9812..362ce78de9d 100644
--- a/invokeai/app/invocations/flux_vae_decode.py
+++ b/invokeai/app/invocations/flux_vae_decode.py
@@ -54,6 +54,7 @@ def _vae_decode(self, vae_info: LoadedModel, latents: torch.Tensor) -> Image.Ima
def invoke(self, context: InvocationContext) -> ImageOutput:
latents = context.tensors.load(self.latents.latents_name)
vae_info = context.models.load(self.vae.vae)
+ context.util.signal_progress("Running VAE")
image = self._vae_decode(vae_info=vae_info, latents=latents)
TorchDevice.empty_cache()
diff --git a/invokeai/app/invocations/flux_vae_encode.py b/invokeai/app/invocations/flux_vae_encode.py
index ab7ed584c0b..c4e8d0e42a2 100644
--- a/invokeai/app/invocations/flux_vae_encode.py
+++ b/invokeai/app/invocations/flux_vae_encode.py
@@ -59,6 +59,7 @@ def invoke(self, context: InvocationContext) -> LatentsOutput:
if image_tensor.dim() == 3:
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
+ context.util.signal_progress("Running VAE")
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
latents = latents.to("cpu")
diff --git a/invokeai/app/invocations/image_to_latents.py b/invokeai/app/invocations/image_to_latents.py
index dadd8bb3a10..d288b8d99bb 100644
--- a/invokeai/app/invocations/image_to_latents.py
+++ b/invokeai/app/invocations/image_to_latents.py
@@ -117,6 +117,7 @@ def invoke(self, context: InvocationContext) -> LatentsOutput:
if image_tensor.dim() == 3:
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
+ context.util.signal_progress("Running VAE encoder")
latents = self.vae_encode(
vae_info=vae_info, upcast=self.fp32, tiled=self.tiled, image_tensor=image_tensor, tile_size=self.tile_size
)
diff --git a/invokeai/app/invocations/latents_to_image.py b/invokeai/app/invocations/latents_to_image.py
index 35b8483f2cc..1cb5ae78e77 100644
--- a/invokeai/app/invocations/latents_to_image.py
+++ b/invokeai/app/invocations/latents_to_image.py
@@ -60,6 +60,7 @@ def invoke(self, context: InvocationContext) -> ImageOutput:
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
with SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes), vae_info as vae:
+ context.util.signal_progress("Running VAE decoder")
assert isinstance(vae, (AutoencoderKL, AutoencoderTiny))
latents = latents.to(vae.device)
if self.fp32:
diff --git a/invokeai/app/invocations/sd3_latents_to_image.py b/invokeai/app/invocations/sd3_latents_to_image.py
index 1b3add597a4..184759b2f02 100644
--- a/invokeai/app/invocations/sd3_latents_to_image.py
+++ b/invokeai/app/invocations/sd3_latents_to_image.py
@@ -47,6 +47,7 @@ def invoke(self, context: InvocationContext) -> ImageOutput:
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, (AutoencoderKL))
with SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes), vae_info as vae:
+ context.util.signal_progress("Running VAE")
assert isinstance(vae, (AutoencoderKL))
latents = latents.to(vae.device)
diff --git a/invokeai/app/invocations/sd3_text_encoder.py b/invokeai/app/invocations/sd3_text_encoder.py
index 4a909201994..5969eda0955 100644
--- a/invokeai/app/invocations/sd3_text_encoder.py
+++ b/invokeai/app/invocations/sd3_text_encoder.py
@@ -95,6 +95,7 @@ def _t5_encode(self, context: InvocationContext, max_seq_len: int) -> torch.Tens
t5_text_encoder_info as t5_text_encoder,
t5_tokenizer_info as t5_tokenizer,
):
+ context.util.signal_progress("Running T5 encoder")
assert isinstance(t5_text_encoder, T5EncoderModel)
assert isinstance(t5_tokenizer, (T5Tokenizer, T5TokenizerFast))
@@ -137,6 +138,7 @@ def _clip_encode(
clip_tokenizer_info as clip_tokenizer,
ExitStack() as exit_stack,
):
+ context.util.signal_progress("Running CLIP encoder")
assert isinstance(clip_text_encoder, (CLIPTextModel, CLIPTextModelWithProjection))
assert isinstance(clip_tokenizer, CLIPTokenizer)
diff --git a/invokeai/app/services/shared/invocation_context.py b/invokeai/app/services/shared/invocation_context.py
index 971fcd4b594..721934cecb1 100644
--- a/invokeai/app/services/shared/invocation_context.py
+++ b/invokeai/app/services/shared/invocation_context.py
@@ -160,6 +160,10 @@ def error(self, message: str) -> None:
class ImagesInterface(InvocationContextInterface):
+ def __init__(self, services: InvocationServices, data: InvocationContextData, util: "UtilInterface") -> None:
+ super().__init__(services, data)
+ self._util = util
+
def save(
self,
image: Image,
@@ -186,6 +190,8 @@ def save(
The saved image DTO.
"""
+ self._util.signal_progress("Saving image")
+
# If `metadata` is provided directly, use that. Else, use the metadata provided by `WithMetadata`, falling back to None.
metadata_ = None
if metadata:
@@ -336,6 +342,10 @@ def load(self, name: str) -> ConditioningFieldData:
class ModelsInterface(InvocationContextInterface):
"""Common API for loading, downloading and managing models."""
+ def __init__(self, services: InvocationServices, data: InvocationContextData, util: "UtilInterface") -> None:
+ super().__init__(services, data)
+ self._util = util
+
def exists(self, identifier: Union[str, "ModelIdentifierField"]) -> bool:
"""Check if a model exists.
@@ -368,11 +378,15 @@ def load(
if isinstance(identifier, str):
model = self._services.model_manager.store.get_model(identifier)
- return self._services.model_manager.load.load_model(model, submodel_type)
else:
- _submodel_type = submodel_type or identifier.submodel_type
+ submodel_type = submodel_type or identifier.submodel_type
model = self._services.model_manager.store.get_model(identifier.key)
- return self._services.model_manager.load.load_model(model, _submodel_type)
+
+ message = f"Loading model {model.name}"
+ if submodel_type:
+ message += f" ({submodel_type.value})"
+ self._util.signal_progress(message)
+ return self._services.model_manager.load.load_model(model, submodel_type)
def load_by_attrs(
self, name: str, base: BaseModelType, type: ModelType, submodel_type: Optional[SubModelType] = None
@@ -397,6 +411,10 @@ def load_by_attrs(
if len(configs) > 1:
raise ValueError(f"More than one model found with name {name}, base {base}, and type {type}")
+ message = f"Loading model {name}"
+ if submodel_type:
+ message += f" ({submodel_type.value})"
+ self._util.signal_progress(message)
return self._services.model_manager.load.load_model(configs[0], submodel_type)
def get_config(self, identifier: Union[str, "ModelIdentifierField"]) -> AnyModelConfig:
@@ -467,6 +485,7 @@ def download_and_cache_model(
Returns:
Path to the downloaded model
"""
+ self._util.signal_progress(f"Downloading model {source}")
return self._services.model_manager.install.download_and_cache_model(source=source)
def load_local_model(
@@ -489,6 +508,8 @@ def load_local_model(
Returns:
A LoadedModelWithoutConfig object.
"""
+
+ self._util.signal_progress(f"Loading model {model_path.name}")
return self._services.model_manager.load.load_model_from_path(model_path=model_path, loader=loader)
def load_remote_model(
@@ -514,6 +535,8 @@ def load_remote_model(
A LoadedModelWithoutConfig object.
"""
model_path = self._services.model_manager.install.download_and_cache_model(source=str(source))
+
+ self._util.signal_progress(f"Loading model {source}")
return self._services.model_manager.load.load_model_from_path(model_path=model_path, loader=loader)
@@ -707,12 +730,12 @@ def build_invocation_context(
"""
logger = LoggerInterface(services=services, data=data)
- images = ImagesInterface(services=services, data=data)
tensors = TensorsInterface(services=services, data=data)
- models = ModelsInterface(services=services, data=data)
config = ConfigInterface(services=services, data=data)
util = UtilInterface(services=services, data=data, is_canceled=is_canceled)
conditioning = ConditioningInterface(services=services, data=data)
+ models = ModelsInterface(services=services, data=data, util=util)
+ images = ImagesInterface(services=services, data=data, util=util)
boards = BoardsInterface(services=services, data=data)
ctx = InvocationContext(
diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json
index d5ad0cf8f9a..55be4a8dcde 100644
--- a/invokeai/frontend/web/public/locales/en.json
+++ b/invokeai/frontend/web/public/locales/en.json
@@ -174,7 +174,8 @@
"placeholderSelectAModel": "Select a model",
"reset": "Reset",
"none": "None",
- "new": "New"
+ "new": "New",
+ "generating": "Generating"
},
"hrf": {
"hrf": "High Resolution Fix",
@@ -1139,6 +1140,7 @@
"resetWebUI": "Reset Web UI",
"resetWebUIDesc1": "Resetting the web UI only resets the browser's local cache of your images and remembered settings. It does not delete any images from disk.",
"resetWebUIDesc2": "If images aren't showing up in the gallery or something else isn't working, please try resetting before submitting an issue on GitHub.",
+ "showDetailedInvocationProgress": "Show Progress Details",
"showProgressInViewer": "Show Progress Images in Viewer",
"ui": "User Interface",
"clearIntermediatesDisabled": "Queue must be empty to clear intermediates",
diff --git a/invokeai/frontend/web/src/app/types/invokeai.ts b/invokeai/frontend/web/src/app/types/invokeai.ts
index bee61eb49aa..8ec968e2698 100644
--- a/invokeai/frontend/web/src/app/types/invokeai.ts
+++ b/invokeai/frontend/web/src/app/types/invokeai.ts
@@ -25,7 +25,8 @@ export type AppFeature =
| 'invocationCache'
| 'bulkDownload'
| 'starterModels'
- | 'hfToken';
+ | 'hfToken'
+ | 'invocationProgressAlert';
/**
* A disable-able Stable Diffusion feature
diff --git a/invokeai/frontend/web/src/features/controlLayers/components/CanvasAlerts/CanvasAlertsInvocationProgress.tsx b/invokeai/frontend/web/src/features/controlLayers/components/CanvasAlerts/CanvasAlertsInvocationProgress.tsx
new file mode 100644
index 00000000000..3368b457138
--- /dev/null
+++ b/invokeai/frontend/web/src/features/controlLayers/components/CanvasAlerts/CanvasAlertsInvocationProgress.tsx
@@ -0,0 +1,45 @@
+import { Alert, AlertDescription, AlertIcon, AlertTitle } from '@invoke-ai/ui-library';
+import { useStore } from '@nanostores/react';
+import { useAppSelector } from 'app/store/storeHooks';
+import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
+import { selectSystemShouldShowInvocationProgressDetail } from 'features/system/store/systemSlice';
+import { memo } from 'react';
+import { useTranslation } from 'react-i18next';
+import { $invocationProgressMessage } from 'services/events/stores';
+
+const CanvasAlertsInvocationProgressContent = memo(() => {
+ const { t } = useTranslation();
+ const invocationProgressMessage = useStore($invocationProgressMessage);
+
+ if (!invocationProgressMessage) {
+ return null;
+ }
+
+ return (
+
+
+ {t('common.generating')}
+ {invocationProgressMessage}
+
+ );
+});
+CanvasAlertsInvocationProgressContent.displayName = 'CanvasAlertsInvocationProgressContent';
+
+export const CanvasAlertsInvocationProgress = memo(() => {
+ const isProgressMessageAlertEnabled = useFeatureStatus('invocationProgressAlert');
+ const shouldShowInvocationProgressDetail = useAppSelector(selectSystemShouldShowInvocationProgressDetail);
+
+ // The alert is disabled at the system level
+ if (!isProgressMessageAlertEnabled) {
+ return null;
+ }
+
+ // The alert is disabled at the user level
+ if (!shouldShowInvocationProgressDetail) {
+ return null;
+ }
+
+ return ;
+});
+
+CanvasAlertsInvocationProgress.displayName = 'CanvasAlertsInvocationProgress';
diff --git a/invokeai/frontend/web/src/features/controlLayers/components/CanvasMainPanelContent.tsx b/invokeai/frontend/web/src/features/controlLayers/components/CanvasMainPanelContent.tsx
index 91931980659..18fd8d983db 100644
--- a/invokeai/frontend/web/src/features/controlLayers/components/CanvasMainPanelContent.tsx
+++ b/invokeai/frontend/web/src/features/controlLayers/components/CanvasMainPanelContent.tsx
@@ -21,6 +21,8 @@ import { GatedImageViewer } from 'features/gallery/components/ImageViewer/ImageV
import { memo, useCallback, useRef } from 'react';
import { PiDotsThreeOutlineVerticalFill } from 'react-icons/pi';
+import { CanvasAlertsInvocationProgress } from './CanvasAlerts/CanvasAlertsInvocationProgress';
+
const MenuContent = () => {
return (
@@ -84,6 +86,7 @@ export const CanvasMainPanelContent = memo(() => {
+
{shouldShowImageDetails && imageDTO && (
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx
index a173a95c428..1a33215d54c 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/ImageViewer.tsx
@@ -44,7 +44,6 @@ export const ImageViewer = memo(({ closeButton }: Props) => {
right={0}
bottom={0}
left={0}
- rowGap={2}
alignItems="center"
justifyContent="center"
>
diff --git a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsModal.tsx b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsModal.tsx
index 00e28630065..3117a39a8e9 100644
--- a/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsModal.tsx
+++ b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsModal.tsx
@@ -26,17 +26,20 @@ import { SettingsDeveloperLogLevel } from 'features/system/components/SettingsMo
import { SettingsDeveloperLogNamespaces } from 'features/system/components/SettingsModal/SettingsDeveloperLogNamespaces';
import { useClearIntermediates } from 'features/system/components/SettingsModal/useClearIntermediates';
import { StickyScrollable } from 'features/system/components/StickyScrollable';
+import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import {
selectSystemShouldAntialiasProgressImage,
selectSystemShouldConfirmOnDelete,
selectSystemShouldConfirmOnNewSession,
selectSystemShouldEnableInformationalPopovers,
selectSystemShouldEnableModelDescriptions,
+ selectSystemShouldShowInvocationProgressDetail,
selectSystemShouldUseNSFWChecker,
selectSystemShouldUseWatermarker,
setShouldConfirmOnDelete,
setShouldEnableInformationalPopovers,
setShouldEnableModelDescriptions,
+ setShouldShowInvocationProgressDetail,
shouldAntialiasProgressImageChanged,
shouldConfirmOnNewSessionToggled,
shouldUseNSFWCheckerChanged,
@@ -103,6 +106,8 @@ const SettingsModal = ({ config = defaultConfig, children }: SettingsModalProps)
const shouldEnableInformationalPopovers = useAppSelector(selectSystemShouldEnableInformationalPopovers);
const shouldEnableModelDescriptions = useAppSelector(selectSystemShouldEnableModelDescriptions);
const shouldConfirmOnNewSession = useAppSelector(selectSystemShouldConfirmOnNewSession);
+ const shouldShowInvocationProgressDetail = useAppSelector(selectSystemShouldShowInvocationProgressDetail);
+ const isInvocationProgressAlertEnabled = useFeatureStatus('invocationProgressAlert');
const onToggleConfirmOnNewSession = useCallback(() => {
dispatch(shouldConfirmOnNewSessionToggled());
}, [dispatch]);
@@ -170,6 +175,13 @@ const SettingsModal = ({ config = defaultConfig, children }: SettingsModalProps)
[dispatch]
);
+ const handleChangeShouldShowInvocationProgressDetail = useCallback(
+ (e: ChangeEvent) => {
+ dispatch(setShouldShowInvocationProgressDetail(e.target.checked));
+ },
+ [dispatch]
+ );
+
return (
<>
{cloneElement(children, {
@@ -221,6 +233,15 @@ const SettingsModal = ({ config = defaultConfig, children }: SettingsModalProps)
onChange={handleChangeShouldAntialiasProgressImage}
/>
+ {isInvocationProgressAlertEnabled && (
+
+ {t('settings.showDetailedInvocationProgress')}
+
+
+ )}
{t('parameters.useCpuNoise')}
diff --git a/invokeai/frontend/web/src/features/system/store/systemSlice.ts b/invokeai/frontend/web/src/features/system/store/systemSlice.ts
index e45308bcdaf..dbf23ee88b9 100644
--- a/invokeai/frontend/web/src/features/system/store/systemSlice.ts
+++ b/invokeai/frontend/web/src/features/system/store/systemSlice.ts
@@ -21,6 +21,7 @@ const initialSystemState: SystemState = {
logIsEnabled: true,
logLevel: 'debug',
logNamespaces: [...zLogNamespace.options],
+ shouldShowInvocationProgressDetail: false,
};
export const systemSlice = createSlice({
@@ -64,6 +65,9 @@ export const systemSlice = createSlice({
shouldConfirmOnNewSessionToggled(state) {
state.shouldConfirmOnNewSession = !state.shouldConfirmOnNewSession;
},
+ setShouldShowInvocationProgressDetail(state, action: PayloadAction) {
+ state.shouldShowInvocationProgressDetail = action.payload;
+ },
},
});
@@ -79,6 +83,7 @@ export const {
setShouldEnableInformationalPopovers,
setShouldEnableModelDescriptions,
shouldConfirmOnNewSessionToggled,
+ setShouldShowInvocationProgressDetail,
} = systemSlice.actions;
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
@@ -117,3 +122,6 @@ export const selectSystemShouldEnableModelDescriptions = createSystemSelector(
(system) => system.shouldEnableModelDescriptions
);
export const selectSystemShouldConfirmOnNewSession = createSystemSelector((system) => system.shouldConfirmOnNewSession);
+export const selectSystemShouldShowInvocationProgressDetail = createSystemSelector(
+ (system) => system.shouldShowInvocationProgressDetail
+);
diff --git a/invokeai/frontend/web/src/features/system/store/types.ts b/invokeai/frontend/web/src/features/system/store/types.ts
index 1bbe7095367..10207eb3211 100644
--- a/invokeai/frontend/web/src/features/system/store/types.ts
+++ b/invokeai/frontend/web/src/features/system/store/types.ts
@@ -41,4 +41,5 @@ export interface SystemState {
logIsEnabled: boolean;
logLevel: LogLevel;
logNamespaces: LogNamespace[];
+ shouldShowInvocationProgressDetail: boolean;
}
diff --git a/invokeai/frontend/web/src/services/events/stores.ts b/invokeai/frontend/web/src/services/events/stores.ts
index 043fced8024..473a7292ac9 100644
--- a/invokeai/frontend/web/src/services/events/stores.ts
+++ b/invokeai/frontend/web/src/services/events/stores.ts
@@ -1,3 +1,4 @@
+import { round } from 'lodash-es';
import { atom, computed, map } from 'nanostores';
import type { S } from 'services/api/types';
import type { AppSocket } from 'services/events/types';
@@ -10,3 +11,16 @@ export const $lastProgressEvent = atom(null
export const $progressImage = computed($lastProgressEvent, (val) => val?.image ?? null);
export const $hasProgressImage = computed($lastProgressEvent, (val) => Boolean(val?.image));
export const $isProgressFromCanvas = computed($lastProgressEvent, (val) => val?.destination === 'canvas');
+export const $invocationProgressMessage = computed($lastProgressEvent, (val) => {
+ if (!val) {
+ return null;
+ }
+ if (val.destination !== 'canvas') {
+ return null;
+ }
+ let message = val.message;
+ if (val.percentage) {
+ message += ` (${round(val.percentage * 100)}%)`;
+ }
+ return message;
+});
diff --git a/tests/app/services/model_load/test_load_api.py b/tests/app/services/model_load/test_load_api.py
index 6f2c7bd931b..c0760cd3cad 100644
--- a/tests/app/services/model_load/test_load_api.py
+++ b/tests/app/services/model_load/test_load_api.py
@@ -6,7 +6,11 @@
from invokeai.app.services.invocation_services import InvocationServices
from invokeai.app.services.model_manager import ModelManagerServiceBase
-from invokeai.app.services.shared.invocation_context import InvocationContext, build_invocation_context
+from invokeai.app.services.shared.invocation_context import (
+ InvocationContext,
+ InvocationContextData,
+ build_invocation_context,
+)
from invokeai.backend.model_manager.load.load_base import LoadedModelWithoutConfig
from tests.backend.model_manager.model_manager_fixtures import * # noqa F403
@@ -19,7 +23,7 @@ def mock_context(
mock_services.model_manager = mm2_model_manager
return build_invocation_context(
services=mock_services,
- data=None, # type: ignore
+ data=InvocationContextData(queue_item=None, invocation=None, source_invocation_id=None), # type: ignore
is_canceled=None, # type: ignore
)
diff --git a/tests/test_nodes.py b/tests/test_nodes.py
index 2d413a2687f..ad354fde352 100644
--- a/tests/test_nodes.py
+++ b/tests/test_nodes.py
@@ -9,6 +9,8 @@
from invokeai.app.invocations.fields import InputField, OutputField
from invokeai.app.invocations.image import ImageField
from invokeai.app.services.events.events_common import EventBase
+from invokeai.app.services.session_processor.session_processor_common import ProgressImage
+from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem
from invokeai.app.services.shared.invocation_context import InvocationContext
@@ -133,6 +135,16 @@ def dispatch(self, event: EventBase) -> None:
self.events.append(event)
pass
+ def emit_invocation_progress(
+ self,
+ queue_item: "SessionQueueItem",
+ invocation: "BaseInvocation",
+ message: str,
+ percentage: float | None = None,
+ image: "ProgressImage | None" = None,
+ ) -> None:
+ pass
+
def wait_until(condition: Callable[[], bool], timeout: int = 10, interval: float = 0.1) -> None:
import time