From f0bacbe1d7213cd436ddf3167e01432c470b7d44 Mon Sep 17 00:00:00 2001 From: Kunshang Ji Date: Mon, 25 Mar 2024 01:20:09 +0000 Subject: [PATCH] minor fix: get_device -> device --- vllm/model_executor/layers/rotary_embedding.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/layers/rotary_embedding.py b/vllm/model_executor/layers/rotary_embedding.py index 71af9b26e2e93..d80e73bbe39e9 100644 --- a/vllm/model_executor/layers/rotary_embedding.py +++ b/vllm/model_executor/layers/rotary_embedding.py @@ -108,7 +108,7 @@ def _forward( query_pass = query[..., self.rotary_dim:] key_pass = key[..., self.rotary_dim:] - self.cos_sin_cache = self.cos_sin_cache.to(positions.get_device()) + self.cos_sin_cache = self.cos_sin_cache.to(positions.device) cos_sin = self.cos_sin_cache[torch.add(positions, offsets) if offsets is not None else positions] cos, sin = cos_sin.chunk(2, dim=-1) @@ -142,7 +142,7 @@ def forward( key: torch.Tensor, offsets: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: - self.cos_sin_cache = self.cos_sin_cache.to(positions.get_device()) + self.cos_sin_cache = self.cos_sin_cache.to(positions.device) # ops.rotary_embedding()/batched_rotary_embedding() # are in-place operations that update the query and key tensors. if offsets is not None: