From 266a7269f94c462254aff99b8ab0f3ec9c031893 Mon Sep 17 00:00:00 2001 From: Kevin Kibe Date: Tue, 24 Sep 2024 20:31:49 +0300 Subject: [PATCH] update(DOCS): Trainer module docs (#198) --- DOCS/gettingstarted.md | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/DOCS/gettingstarted.md b/DOCS/gettingstarted.md index 87d1f5f..dd40764 100644 --- a/DOCS/gettingstarted.md +++ b/DOCS/gettingstarted.md @@ -81,23 +81,30 @@ from training.model_trainer import Trainer # Initialize the Trainer class and train the model trainer = Trainer( - huggingface_token, - model_id, - processed_dataset, - model, - feature_processor, - feature_extractor, - tokenizer, - wandb_api_key, - use_peft, - processing_task + huggingface_token = huggingface_token, + model_id = model_id, + dataset =processed_dataset, + model= model, + feature_processor= feature_processor, + feature_extractor= feature_extractor, + tokenizer= tokenizer, + wandb_api_key= wandb_api_key, + use_peft=use_peft, + processing_task=processing_task, + language = language_abbr ) trainer.train( - max_steps=100, - learning_rate=1e-3, - per_device_train_batch_size=8, # Adjust based on available RAM; increase if more RAM is available - per_device_eval_batch_size=8, # Adjust based on available RAM; increase if more RAM is available - optim="adamw_bnb_8bit" + warmup_steps=10, + max_steps=500, + learning_rate=0.0001, + lr_scheduler_type="constant_with_warmup", + per_device_train_batch_size=32, # Adjust based on available RAM; increase if more RAM is available + per_device_eval_batch_size=32, # Adjust based on available RAM; increase if more RAM is available + optim="adamw_bnb_8bit", + save_steps=100, + logging_steps=100, + eval_steps=100, + gradient_checkpointing=True, ) # Optional parameters for training: