forked from mzbac/llama2-fine-tune
-
Notifications
You must be signed in to change notification settings - Fork 0
/
dpo_trainer.py
105 lines (85 loc) · 2.63 KB
/
dpo_trainer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import os
import torch
from datasets import load_dataset
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
TrainingArguments,
)
from trl import DPOTrainer
from utils import find_all_linear_names, print_trainable_parameters
output_dir = "./dpo_results"
model_name = "merged_peft/final_merged_checkpoint"
dataset = load_dataset("json", data_files="dpo_conversations.json", split="train")
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
)
model = AutoModelForCausalLM.from_pretrained(
model_name, torch_dtype=torch.bfloat16, quantization_config=bnb_config
)
model.config.use_cache = False
model = prepare_model_for_kbit_training(model)
model_ref = AutoModelForCausalLM.from_pretrained(
model_name, torch_dtype=torch.bfloat16, quantization_config=bnb_config
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
def return_prompt_and_responses(samples):
return {
"prompt": [
f"An AI tool that corrects and rephrase user text grammar errors delimited by triple backticks to standard English.\n### Input: ```{input}```\n ### Output: "
for input in samples["input"]
],
"chosen": samples["chosen"],
"rejected": samples["rejected"],
}
original_columns = dataset.column_names
dataset = dataset.map(
return_prompt_and_responses, batched=True, remove_columns=original_columns
)
training_args = TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
gradient_checkpointing=True,
max_grad_norm=0.3,
num_train_epochs=15,
save_steps=100,
learning_rate=2e-4,
bf16=True,
save_total_limit=3,
logging_steps=10,
output_dir=output_dir,
optim="paged_adamw_32bit",
lr_scheduler_type="cosine",
warmup_ratio=0.05,
remove_unused_columns=False,
)
peft_config = LoraConfig(
r=32,
lora_alpha=16,
target_modules=find_all_linear_names(model),
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, peft_config)
print_trainable_parameters(model)
dpo_trainer = DPOTrainer(
model,
model_ref,
args=training_args,
beta=0.1,
train_dataset=dataset,
tokenizer=tokenizer,
max_prompt_length=1024,
max_length=2048,
)
dpo_trainer.train()
dpo_trainer.save_model(output_dir)
output_dir = os.path.join(output_dir, "final_checkpoint")
dpo_trainer.model.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)