Skip to content

Commit

Permalink
fix qwen inference
Browse files Browse the repository at this point in the history
  • Loading branch information
hiyouga committed Aug 3, 2023
1 parent ea30da4 commit 2780792
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 7 deletions.
4 changes: 1 addition & 3 deletions src/llmtuner/chat/stream_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@ def __init__(self, args: Optional[Dict[str, Any]] = None) -> None:
self.model = dispatch_model(self.model)
self.template = get_template(data_args.template)
self.source_prefix = data_args.source_prefix
self.stop_ids = [
self.tokenizer.encode(word, add_special_tokens=False)[0] for word in self.template.stop_words
]
self.stop_ids = self.tokenizer.convert_tokens_to_ids(self.template.stop_words)
self.tokenizer.add_special_tokens(dict(additional_special_tokens=self.template.stop_words))
self.model.generate = MethodType(PreTrainedModel.generate, self.model) # a monkey fix for qwen model

Expand Down
10 changes: 6 additions & 4 deletions src/llmtuner/tuner/core/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,14 @@
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig
BitsAndBytesConfig,
PretrainedConfig,
PreTrainedModel,
PreTrainedTokenizerBase
)
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.modeling_utils import PretrainedConfig, PreTrainedModel
from transformers.tokenization_utils import PreTrainedTokenizerBase
from trl import AutoModelForCausalLMWithValueHead

from llmtuner.extras.logging import reset_logging, get_logger
Expand All @@ -22,6 +23,7 @@
from llmtuner.tuner.core.adapter import init_adapter

if TYPE_CHECKING:
from transformers import PreTrainedTokenizer
from llmtuner.hparams import ModelArguments


Expand All @@ -40,7 +42,7 @@ def load_model_and_tokenizer(
finetuning_args: "FinetuningArguments",
is_trainable: Optional[bool] = False,
stage: Optional[Literal["pt", "sft", "rm", "ppo"]] = "sft"
) -> Tuple[PreTrainedModel, PreTrainedTokenizerBase]:
) -> Tuple[PreTrainedModel, "PreTrainedTokenizer"]:
r"""
Loads pretrained model and tokenizer.
Expand Down

0 comments on commit 2780792

Please sign in to comment.