You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
No operator found for memory_efficient_attention_forward with inputs:
query : shape=(1, 4624, 1, 512) (torch.bfloat16)
key : shape=(1, 4624, 1, 512) (torch.bfloat16)
value : shape=(1, 4624, 1, 512) (torch.bfloat16)
attn_bias :
p : 0.0 decoderF is not supported because:
max(query.shape[-1] != value.shape[-1]) > 128
xFormers wasn't build with CUDA support
attn_bias type is [email protected] is not supported because:
max(query.shape[-1] != value.shape[-1]) > 256
xFormers wasn't build with CUDA support
operator wasn't built - see python -m xformers.info for more info tritonflashattF is not supported because:
max(query.shape[-1] != value.shape[-1]) > 128
xFormers wasn't build with CUDA support
operator wasn't built - see python -m xformers.info for more info
triton is not available cutlassF is not supported because:
xFormers wasn't build with CUDA support smallkF is not supported because:
max(query.shape[-1] != value.shape[-1]) > 32
xFormers wasn't build with CUDA support
dtype=torch.bfloat16 (supported: {torch.float32})
unsupported embed per head: 512
File "E:\test\UPRES\ComfyUI\execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\execution.py", line 74, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\nodes.py", line 336, in process
samples = self.model.batchify_sample(imgs, caps, num_steps=steps,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\models\SUPIR_model.py", line 121, in batchify_sample
_z = self.encode_first_stage_with_denoise(x, use_sample=False)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\models\SUPIR_model.py", line 48, in encode_first_stage_with_denoise
h = self.first_stage_model.denoise_encoder(x)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\torch\nn\modules\module.py", line 1520, in call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\utils\tilevae.py", line 704, in call
return self.vae_tile_forward(x)
^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\utils\tilevae.py", line 587, in wrapper
ret = fn(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\utils\tilevae.py", line 937, in vae_tile_forward
tile = task1
^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\utils\tilevae.py", line 372, in
('attn', lambda x, net=net: xformer_attn_forward(net, x)))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\utils\tilevae.py", line 340, in xformer_attn_forward
out = xformers.ops.memory_efficient_attention(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\xformers\ops\fmha_init.py", line 223, in memory_efficient_attention
return memory_efficient_attention(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\xformers\ops\fmha_init.py", line 321, in _memory_efficient_attention
return memory_efficient_attention_forward(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\xformers\ops\fmha_init.py", line 337, in _memory_efficient_attention_forward
op = _dispatch_fw(inp, False)
^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\xformers\ops\fmha\dispatch.py", line 120, in _dispatch_fw
return _run_priority_list(
^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\xformers\ops\fmha\dispatch.py", line 63, in _run_priority_list
raise NotImplementedError(msg)
windows 11 23h2
The text was updated successfully, but these errors were encountered:
No operator found for
memory_efficient_attention_forward
with inputs:query : shape=(1, 4624, 1, 512) (torch.bfloat16)
key : shape=(1, 4624, 1, 512) (torch.bfloat16)
value : shape=(1, 4624, 1, 512) (torch.bfloat16)
attn_bias :
p : 0.0
decoderF
is not supported because:max(query.shape[-1] != value.shape[-1]) > 128
xFormers wasn't build with CUDA support
attn_bias type is
[email protected]
is not supported because:max(query.shape[-1] != value.shape[-1]) > 256
xFormers wasn't build with CUDA support
operator wasn't built - see
python -m xformers.info
for more infotritonflashattF
is not supported because:max(query.shape[-1] != value.shape[-1]) > 128
xFormers wasn't build with CUDA support
operator wasn't built - see
python -m xformers.info
for more infotriton is not available
cutlassF
is not supported because:xFormers wasn't build with CUDA support
smallkF
is not supported because:max(query.shape[-1] != value.shape[-1]) > 32
xFormers wasn't build with CUDA support
dtype=torch.bfloat16 (supported: {torch.float32})
unsupported embed per head: 512
File "E:\test\UPRES\ComfyUI\execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\execution.py", line 74, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\nodes.py", line 336, in process
samples = self.model.batchify_sample(imgs, caps, num_steps=steps,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\models\SUPIR_model.py", line 121, in batchify_sample
_z = self.encode_first_stage_with_denoise(x, use_sample=False)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\models\SUPIR_model.py", line 48, in encode_first_stage_with_denoise
h = self.first_stage_model.denoise_encoder(x)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\torch\nn\modules\module.py", line 1520, in call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\utils\tilevae.py", line 704, in call
return self.vae_tile_forward(x)
^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\utils\tilevae.py", line 587, in wrapper
ret = fn(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\utils\tilevae.py", line 937, in vae_tile_forward
tile = task1
^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\utils\tilevae.py", line 372, in
('attn', lambda x, net=net: xformer_attn_forward(net, x)))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\test\UPRES\ComfyUI\custom_nodes\ComfyUI-SUPIR\SUPIR\utils\tilevae.py", line 340, in xformer_attn_forward
out = xformers.ops.memory_efficient_attention(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\xformers\ops\fmha_init.py", line 223, in memory_efficient_attention
return memory_efficient_attention(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\xformers\ops\fmha_init.py", line 321, in _memory_efficient_attention
return memory_efficient_attention_forward(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\xformers\ops\fmha_init.py", line 337, in _memory_efficient_attention_forward
op = _dispatch_fw(inp, False)
^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\xformers\ops\fmha\dispatch.py", line 120, in _dispatch_fw
return _run_priority_list(
^^^^^^^^^^^^^^^^^^^
File "C:\Users\vovai\miniconda3\Lib\site-packages\xformers\ops\fmha\dispatch.py", line 63, in _run_priority_list
raise NotImplementedError(msg)
windows 11 23h2
The text was updated successfully, but these errors were encountered: