Skip to content

Commit

Permalink
Fix model_management import so it doesn't get executed twice.
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Apr 15, 2023
1 parent 81d1f00 commit 73c3e11
Show file tree
Hide file tree
Showing 7 changed files with 14 additions and 14 deletions.
2 changes: 1 addition & 1 deletion comfy/ldm/modules/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from ldm.modules.diffusionmodules.util import checkpoint
from .sub_quadratic_attention import efficient_dot_product_attention

import model_management
from comfy import model_management

from . import tomesd

Expand Down
2 changes: 1 addition & 1 deletion comfy/ldm/modules/diffusionmodules/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from typing import Optional, Any

from ldm.modules.attention import MemoryEfficientCrossAttention
import model_management
from comfy import model_management

if model_management.xformers_enabled_vae():
import xformers
Expand Down
2 changes: 1 addition & 1 deletion comfy/ldm/modules/sub_quadratic_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from torch import Tensor
from typing import List

import model_management
from comfy import model_management

def dynamic_slice(
x: Tensor,
Expand Down
2 changes: 1 addition & 1 deletion comfy/samplers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from .extra_samplers import uni_pc
import torch
import contextlib
import model_management
from comfy import model_management
from .ldm.models.diffusion.ddim import DDIMSampler
from .ldm.modules.diffusionmodules.util import make_ddim_timesteps

Expand Down
4 changes: 2 additions & 2 deletions comfy/sd.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import sd1_clip
import sd2_clip
import model_management
from comfy import model_management
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
import yaml
Expand Down Expand Up @@ -388,7 +388,7 @@ def encode_from_tokens(self, tokens):
return cond

def encode(self, text):
tokens = self.tokenizer.tokenize_with_weights(text)
tokens = self.tokenize(text)
return self.encode_from_tokens(tokens)

class VAE:
Expand Down
2 changes: 1 addition & 1 deletion comfy_extras/nodes_upscale_model.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os
from comfy_extras.chainner_models import model_loading
import model_management
from comfy import model_management
import torch
import comfy.utils
import folder_paths
Expand Down
14 changes: 7 additions & 7 deletions nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,16 @@

import comfy.clip_vision

import model_management
import comfy.model_management
import importlib

import folder_paths

def before_node_execution():
model_management.throw_exception_if_processing_interrupted()
comfy.model_management.throw_exception_if_processing_interrupted()

def interrupt_processing(value=True):
model_management.interrupt_current_processing(value)
comfy.model_management.interrupt_current_processing(value)

MAX_RESOLUTION=8192

Expand Down Expand Up @@ -241,7 +241,7 @@ def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
model_path = os.path.join(search_path, model_path)
break

return comfy.diffusers_convert.load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
return comfy.diffusers_convert.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))


class unCLIPCheckpointLoader:
Expand Down Expand Up @@ -680,7 +680,7 @@ def set_mask(self, samples, mask):
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
latent_image = latent["samples"]
noise_mask = None
device = model_management.get_torch_device()
device = comfy.model_management.get_torch_device()

if disable_noise:
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
Expand All @@ -696,7 +696,7 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
noise_mask = noise_mask.to(device)

real_model = None
model_management.load_model_gpu(model)
comfy.model_management.load_model_gpu(model)
real_model = model.model

noise = noise.to(device)
Expand Down Expand Up @@ -726,7 +726,7 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
control_net_models = []
for x in control_nets:
control_net_models += x.get_control_models()
model_management.load_controlnet_gpu(control_net_models)
comfy.model_management.load_controlnet_gpu(control_net_models)

if sampler_name in comfy.samplers.KSampler.SAMPLERS:
sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)
Expand Down

0 comments on commit 73c3e11

Please sign in to comment.