Skip to content

cuDNN Frontend error: s_kv not a multiple of 64 with dropout enabled is not supported with cudnn version below 9.0.0 #5

@FranM2030

Description

@FranM2030

ComfyUI Error Report

Error Details

  • Node Type: RdancerFlorence2SAM2GenerateMask
  • Exception Type: RuntimeError
  • Exception Message: cuDNN Frontend error: s_kv not a multiple of 64 with dropout enabled is not supported with cudnn version below 9.0.0

Stack Trace

  File "D:\sync\ComfyUI_windows_portable\ComfyUI\execution.py", line 323, in execute
    output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
                                           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\ComfyUI\execution.py", line 198, in get_output_data
    return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\ComfyUI\execution.py", line 169, in _map_node_over_list
    process_inputs(input_dict, i)

  File "D:\sync\ComfyUI_windows_portable\ComfyUI\execution.py", line 158, in process_inputs
    results.append(getattr(obj, func)(**inputs))
                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_Florence2SAM2\__init__.py", line 83, in _process_image
    annotated_image, mask, masked_image = process_image(torch_device, sam2_model, img, prompt, keep_model_loaded)
                                          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_Florence2SAM2\app.py", line 184, in process_image
    annotated_image, mask_list = _process_image(IMAGE_OPEN_VOCABULARY_DETECTION_MODE, image, promt)
                                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\amp\autocast_mode.py", line 16, in decorate_autocast
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_Florence2SAM2\app.py", line 230, in _process_image
    _, result = run_florence_inference(
                ^^^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_Florence2SAM2\utils\florence.py", line 51, in run_florence_inference
    generated_ids = model.generate(
                    ^^^^^^^^^^^^^^^

  File "C:\Users\peter\.cache\huggingface\modules\transformers_modules\microsoft\Florence-2-base\ceaf371f01ef66192264811b390bccad475a4f02\modeling_florence2.py", line 2797, in generate
    return self.language_model.generate(
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 2027, in generate
    model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 635, in _prepare_encoder_decoder_kwargs_for_generation
    model_kwargs["encoder_outputs"]: ModelOutput = encoder(**encoder_kwargs)  # type: ignore
                                                   ^^^^^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "C:\Users\peter\.cache\huggingface\modules\transformers_modules\microsoft\Florence-2-base\ceaf371f01ef66192264811b390bccad475a4f02\modeling_florence2.py", line 1632, in forward
    layer_outputs = encoder_layer(
                    ^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "C:\Users\peter\.cache\huggingface\modules\transformers_modules\microsoft\Florence-2-base\ceaf371f01ef66192264811b390bccad475a4f02\modeling_florence2.py", line 1281, in forward
    hidden_states, attn_weights, _ = self.self_attn(
                                     ^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  File "C:\Users\peter\.cache\huggingface\modules\transformers_modules\microsoft\Florence-2-base\ceaf371f01ef66192264811b390bccad475a4f02\modeling_florence2.py", line 1210, in forward
    attn_output = torch.nn.functional.scaled_dot_product_attention(
                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

System Information

  • ComfyUI Version: v0.2.7-9-g6ee066a
  • Arguments: ComfyUI\main.py --windows-standalone-build --use-quad-cross-attention --fp8_e4m3fn-text-enc --normalvram --dont-upcast-attention
  • OS: nt
  • Python Version: 3.11.9 (tags/v3.11.9:de54cf5, Apr 2 2024, 10:12:12) [MSC v.1938 64 bit (AMD64)]
  • Embedded Python: true
  • PyTorch Version: 2.3.1+cu121

Devices

  • Name: cuda:0 NVIDIA GeForce RTX 4090 : cudaMallocAsync
    • Type: cuda
    • VRAM Total: 25756696576
    • VRAM Free: 22495843580
    • Torch VRAM Total: 1509949440
    • Torch VRAM Free: 47928572

Logs



2024-11-14T15:39:49.919504 - To see the GUI go to: http://127.0.0.1:8188
2024-11-14T15:39:52.126021 - FETCH DATA from: D:\sync\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Manager\extension-node-map.json2024-11-14T15:39:52.126021 - 2024-11-14T15:39:52.129294 -  [DONE]2024-11-14T15:39:52.129294 - 
2024-11-14T15:39:52.609029 - Defining INPUT_TYPES2024-11-14T15:39:52.609029 - 
2024-11-14T15:39:52.609029 - Defining INPUT_TYPES2024-11-14T15:39:52.609029 - 
2024-11-14T15:39:53.012867 - �[36;20m[comfy_mtb] | INFO -> Found multiple match, we will pick the last D:\sync\ComfyUI_windows_portable\ComfyUI\models\upscale_models
['D:\\sync\\ComfyUI_windows_portable\\ComfyUI\\models\\upscale_models', 'D:\\sync\\ComfyUI_windows_portable\\ComfyUI\\models\\upscale_models']�[0m
2024-11-14T15:39:54.298604 - []2024-11-14T15:39:54.298604 - 
2024-11-14T15:39:54.298604 - []2024-11-14T15:39:54.298604 - 
2024-11-14T15:40:02.612477 - FETCH DATA from: D:\sync\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Manager\extension-node-map.json2024-11-14T15:40:02.613477 - 2024-11-14T15:40:02.615477 -  [DONE]2024-11-14T15:40:02.615477 - 
2024-11-14T15:40:02.851594 - Defining INPUT_TYPES2024-11-14T15:40:02.851594 - 
2024-11-14T15:40:02.851594 - Defining INPUT_TYPES2024-11-14T15:40:02.851594 - 
2024-11-14T15:40:04.480059 - []2024-11-14T15:40:04.480059 - 
2024-11-14T15:40:04.480059 - []2024-11-14T15:40:04.480059 - 
2024-11-14T15:40:07.086150 - got prompt
2024-11-14T15:40:07.127777 - torch version: 2.3.1+cu1212024-11-14T15:40:07.127777 - 
2024-11-14T15:40:07.127777 - torch CUDA available: True2024-11-14T15:40:07.127777 - 
2024-11-14T15:40:07.127777 - disabling gradients and optimisations2024-11-14T15:40:07.127777 - 
2024-11-14T15:40:07.127777 - CuDNN SDPA was already enabled.2024-11-14T15:40:07.127777 - 
2024-11-14T15:40:07.136867 - Current working directory: D:\sync\ComfyUI_windows_portable2024-11-14T15:40:07.136867 - 
2024-11-14T15:40:07.137868 - 'models' directory exists: D:\sync\ComfyUI_windows_portable\ComfyUI\models2024-11-14T15:40:07.137868 - 
2024-11-14T15:40:07.137868 - 'models/sam2' directory exists: D:\sync\ComfyUI_windows_portable\ComfyUI\models\sam22024-11-14T15:40:07.137868 - 
2024-11-14T15:40:07.137868 - 'models/sam2/sam2_hiera_base_plus.pt' file exists: D:\sync\ComfyUI_windows_portable\ComfyUI\models\sam2\sam2_hiera_base_plus.pt2024-11-14T15:40:07.137868 - 
2024-11-14T15:40:07.581967 - Loaded checkpoint sucessfully
2024-11-14T15:40:07.659679 - torch.jit.script() failed: could not get source code -- proceeding without JIT2024-11-14T15:40:07.659679 - 
2024-11-14T15:40:13.195634 - !!! Exception during processing !!! cuDNN Frontend error: s_kv not a multiple of 64 with dropout enabled is not supported with cudnn version below 9.0.0
2024-11-14T15:40:13.198720 - Traceback (most recent call last):
  File "D:\sync\ComfyUI_windows_portable\ComfyUI\execution.py", line 323, in execute
    output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
                                           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\ComfyUI\execution.py", line 198, in get_output_data
    return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\ComfyUI\execution.py", line 169, in _map_node_over_list
    process_inputs(input_dict, i)
  File "D:\sync\ComfyUI_windows_portable\ComfyUI\execution.py", line 158, in process_inputs
    results.append(getattr(obj, func)(**inputs))
                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_Florence2SAM2\__init__.py", line 83, in _process_image
    annotated_image, mask, masked_image = process_image(torch_device, sam2_model, img, prompt, keep_model_loaded)
                                          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_Florence2SAM2\app.py", line 184, in process_image
    annotated_image, mask_list = _process_image(IMAGE_OPEN_VOCABULARY_DETECTION_MODE, image, promt)
                                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\amp\autocast_mode.py", line 16, in decorate_autocast
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_Florence2SAM2\app.py", line 230, in _process_image
    _, result = run_florence_inference(
                ^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_Florence2SAM2\utils\florence.py", line 51, in run_florence_inference
    generated_ids = model.generate(
                    ^^^^^^^^^^^^^^^
  File "C:\Users\peter\.cache\huggingface\modules\transformers_modules\microsoft\Florence-2-base\ceaf371f01ef66192264811b390bccad475a4f02\modeling_florence2.py", line 2797, in generate
    return self.language_model.generate(
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 2027, in generate
    model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 635, in _prepare_encoder_decoder_kwargs_for_generation
    model_kwargs["encoder_outputs"]: ModelOutput = encoder(**encoder_kwargs)  # type: ignore
                                                   ^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\peter\.cache\huggingface\modules\transformers_modules\microsoft\Florence-2-base\ceaf371f01ef66192264811b390bccad475a4f02\modeling_florence2.py", line 1632, in forward
    layer_outputs = encoder_layer(
                    ^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\peter\.cache\huggingface\modules\transformers_modules\microsoft\Florence-2-base\ceaf371f01ef66192264811b390bccad475a4f02\modeling_florence2.py", line 1281, in forward
    hidden_states, attn_weights, _ = self.self_attn(
                                     ^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\sync\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\peter\.cache\huggingface\modules\transformers_modules\microsoft\Florence-2-base\ceaf371f01ef66192264811b390bccad475a4f02\modeling_florence2.py", line 1210, in forward
    attn_output = torch.nn.functional.scaled_dot_product_attention(
                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
RuntimeError: cuDNN Frontend error: s_kv not a multiple of 64 with dropout enabled is not supported with cudnn version below 9.0.0

2024-11-14T15:40:13.199720 - Prompt executed in 6.10 seconds

Attached Workflow

Please make sure that workflow does not contain any sensitive information such as API keys or passwords.

{"last_node_id":6,"last_link_id":5,"nodes":[{"id":3,"type":"PreviewImage","pos":{"0":2495.822265625,"1":-226.65533447265625},"size":{"0":210,"1":26},"flags":{},"order":2,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":2}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":5,"type":"PreviewImage","pos":{"0":2862.855712890625,"1":7.2780442237854},"size":{"0":210,"1":26},"flags":{},"order":5,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":4}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":6,"type":"PreviewImage","pos":{"0":2448.22900390625,"1":245.24465942382812},"size":{"0":210,"1":26},"flags":{},"order":4,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":5}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":4,"type":"MaskToImage","pos":{"0":2442,"1":-35},"size":{"0":264.5999755859375,"1":26},"flags":{},"order":3,"mode":0,"inputs":[{"name":"mask","type":"MASK","link":3}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[4],"slot_index":0}],"properties":{"Node name for S&R":"MaskToImage"},"widgets_values":[]},{"id":1,"type":"RdancerFlorence2SAM2GenerateMask","pos":{"0":1630,"1":-72},"size":{"0":403.1999816894531,"1":170},"flags":{},"order":1,"mode":0,"inputs":[{"name":"image","type":"IMAGE","link":1}],"outputs":[{"name":"annotated_image","type":"IMAGE","links":[2],"slot_index":0},{"name":"mask","type":"MASK","links":[3],"slot_index":1},{"name":"masked_image","type":"IMAGE","links":[5],"slot_index":2}],"properties":{"Node name for S&R":"RdancerFlorence2SAM2GenerateMask"},"widgets_values":["sam2_hiera_base_plus.pt","cuda","face",false]},{"id":2,"type":"LoadImage","pos":{"0":861,"1":-447},"size":{"0":491.7756652832031,"1":813.2446899414062},"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[1],"slot_index":0},{"name":"MASK","type":"MASK","links":null}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["4A8QDEWKBKMHFW9VMHA4DXDVK0.jpg","image"]}],"links":[[1,2,0,1,0,"IMAGE"],[2,1,0,3,0,"IMAGE"],[3,1,1,4,0,"MASK"],[4,4,0,5,0,"IMAGE"],[5,1,2,6,0,"IMAGE"]],"groups":[],"config":{},"extra":{"ds":{"scale":0.8264462809917354,"offset":[-535.6222758367315,576.4713677398681]}},"version":0.4}

Additional Context

not sure what the issues, have CDUNN 9.5.1 install and latest pytorch, etc

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions