Skip to content

Commit

Permalink
Fix bias in InternLM (vllm-project#1501)
Browse files Browse the repository at this point in the history
  • Loading branch information
WoosukKwon authored Oct 29, 2023
1 parent 3747bd4 commit 5651091
Showing 1 changed file with 4 additions and 2 deletions.
6 changes: 4 additions & 2 deletions vllm/model_executor/models/internlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ def __init__(
self,
hidden_size: int,
num_heads: int,
bias: bool,
rope_theta: float = 10000,
max_position_embeddings: int = 8192,
):
Expand All @@ -81,13 +82,13 @@ def __init__(
self.qkv_proj = ColumnParallelLinear(
hidden_size,
3 * self.total_num_heads * self.head_dim,
bias=True,
bias=bias,
gather_output=False,
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
hidden_size,
bias=True,
bias=bias,
input_is_parallel=True,
)
self.attn = PagedAttentionWithRoPE(
Expand Down Expand Up @@ -126,6 +127,7 @@ def __init__(self, config: LlamaConfig):
self.self_attn = InternLMAttention(
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
bias=config.bias,
rope_theta=rope_theta,
max_position_embeddings=max_position_embeddings,
)
Expand Down

0 comments on commit 5651091

Please sign in to comment.