From 9738b84a08957eb828669e8af27337ee722e8fdc Mon Sep 17 00:00:00 2001 From: Antoni Baum Date: Wed, 1 Nov 2023 16:24:32 -0700 Subject: [PATCH] Force paged attention v2 for long contexts (#1510) --- vllm/model_executor/layers/attention.py | 4 +++- vllm/worker/worker.py | 29 +------------------------ 2 files changed, 4 insertions(+), 29 deletions(-) diff --git a/vllm/model_executor/layers/attention.py b/vllm/model_executor/layers/attention.py index 58f868d407bf7..7aa01ffe14bab 100644 --- a/vllm/model_executor/layers/attention.py +++ b/vllm/model_executor/layers/attention.py @@ -156,7 +156,9 @@ def single_query_cached_kv_attention( # sequences or heads is large, we use V1 since there is enough work # to parallelize. # TODO(woosuk): Tune this heuristic. - use_v1 = max_num_partitions == 1 or num_seqs * num_heads > 512 + # For context len > 8192, use V2 kernel to avoid shared memory shortage. + use_v1 = input_metadata.max_context_len <= 8192 and ( + max_num_partitions == 1 or num_seqs * num_heads > 512) if use_v1: # Run PagedAttention V1. attention_ops.paged_attention_v1( diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py index d598a86cf0c1c..b2391ae788a84 100644 --- a/vllm/worker/worker.py +++ b/vllm/worker/worker.py @@ -13,7 +13,7 @@ from vllm.sampling_params import SamplingParams, SamplingType from vllm.sequence import SamplerOutput, SequenceData, SequenceGroupMetadata from vllm.worker.cache_engine import CacheEngine -from vllm.utils import get_gpu_memory, get_max_shared_memory_bytes +from vllm.utils import get_gpu_memory class Worker: @@ -141,13 +141,6 @@ def init_cache_engine(self, cache_config: CacheConfig) -> None: self.block_size = cache_config.block_size self.sliding_window = cache_config.sliding_window - if self.sliding_window is None: - max_seq_len = self.scheduler_config.max_model_len - else: - max_seq_len = min(self.scheduler_config.max_model_len, - self.sliding_window) - _check_if_can_support_max_seq_len(max_seq_len, self.block_size) - self.cache_engine = CacheEngine(self.cache_config, self.model_config, self.parallel_config) self.cache_events = self.cache_engine.events @@ -421,26 +414,6 @@ def _pad_to_max(x: List[int], max_len: int, pad: int) -> List[int]: return x + [pad] * (max_len - len(x)) -def _check_if_can_support_max_seq_len(max_seq_len: int, - block_size: int) -> None: - # Follows the logic in - # attention_kernels.cu::single_query_cached_kv_attention_launcher - max_shared_mem = get_max_shared_memory_bytes() - float32_bytes = torch.finfo(torch.float).bits // 8 - padded_max_seq_len = ( - (max_seq_len + block_size - 1) / block_size) * block_size - # padded_max_seq_len + extra buffer - required_shared_mem = (padded_max_seq_len + 512) * float32_bytes - if padded_max_seq_len * float32_bytes > max_shared_mem: - raise RuntimeError( - f"vLLM cannot currently support max_model_len={max_seq_len} " - f"with block_size={block_size} on GPU with compute " - f"capability {torch.cuda.get_device_capability()} " - f"(required shared memory {required_shared_mem} > " - f"available shared memory {max_shared_mem}). " - "This will be fixed in a future release.") - - def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype): # Check if the GPU supports the dtype. if torch_dtype == torch.bfloat16: