From bb1af9c22e4837816b7e5ee2669a496b992cd6a2 Mon Sep 17 00:00:00 2001 From: Kunshang Ji Date: Mon, 25 Mar 2024 10:01:13 +0800 Subject: [PATCH] [BugFix] tensor.get_device() -> tensor.device (#3604) --- vllm/model_executor/layers/rotary_embedding.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/layers/rotary_embedding.py b/vllm/model_executor/layers/rotary_embedding.py index 71af9b26e2e93..d80e73bbe39e9 100644 --- a/vllm/model_executor/layers/rotary_embedding.py +++ b/vllm/model_executor/layers/rotary_embedding.py @@ -108,7 +108,7 @@ def _forward( query_pass = query[..., self.rotary_dim:] key_pass = key[..., self.rotary_dim:] - self.cos_sin_cache = self.cos_sin_cache.to(positions.get_device()) + self.cos_sin_cache = self.cos_sin_cache.to(positions.device) cos_sin = self.cos_sin_cache[torch.add(positions, offsets) if offsets is not None else positions] cos, sin = cos_sin.chunk(2, dim=-1) @@ -142,7 +142,7 @@ def forward( key: torch.Tensor, offsets: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: - self.cos_sin_cache = self.cos_sin_cache.to(positions.get_device()) + self.cos_sin_cache = self.cos_sin_cache.to(positions.device) # ops.rotary_embedding()/batched_rotary_embedding() # are in-place operations that update the query and key tensors. if offsets is not None: