Skip to content

Commit

Permalink
Fix for large v3 faster whisper model (#815)
Browse files Browse the repository at this point in the history
  • Loading branch information
raivisdejus authored Jun 26, 2024
1 parent 3d8f5da commit a98ec9f
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 1 deletion.
1 change: 1 addition & 0 deletions buzz/model_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,6 +334,7 @@ def download_faster_whisper_model(
"config.json",
"tokenizer.json",
"vocabulary.txt",
"vocabulary.json",
]

if local_files_only:
Expand Down
8 changes: 7 additions & 1 deletion buzz/transcriber/recording_transcriber.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from PyQt6.QtCore import QObject, pyqtSignal

from buzz import transformers_whisper, whisper_audio
from buzz.model_loader import ModelType, get_custom_api_whisper_model
from buzz.model_loader import WhisperModelSize, ModelType, get_custom_api_whisper_model
from buzz.settings.settings import Settings
from buzz.transcriber.transcriber import TranscriptionOptions, Task
from buzz.transcriber.whisper_cpp import WhisperCpp, whisper_cpp_params
Expand Down Expand Up @@ -68,6 +68,12 @@ def start(self):
model = WhisperCpp(model_path)
elif self.transcription_options.model.model_type == ModelType.FASTER_WHISPER:
model = faster_whisper.WhisperModel(model_path)

# Fix for large-v3 https://github.com/guillaumekln/faster-whisper/issues/547#issuecomment-1797962599
if self.transcription_options.model.whisper_model_size == WhisperModelSize.LARGEV3:
model.feature_extractor.mel_filters = model.feature_extractor.get_mel_filters(
model.feature_extractor.sampling_rate, model.feature_extractor.n_fft, n_mels=128
)
elif self.transcription_options.model.model_type == ModelType.OPEN_AI_WHISPER_API:
settings = Settings()
custom_openai_base_url = settings.value(
Expand Down

0 comments on commit a98ec9f

Please sign in to comment.