diff --git a/keras/layers/preprocessing/text_vectorization.py b/keras/layers/preprocessing/text_vectorization.py index 3e33386bcb8..d1c8cff8d81 100644 --- a/keras/layers/preprocessing/text_vectorization.py +++ b/keras/layers/preprocessing/text_vectorization.py @@ -389,7 +389,7 @@ def adapt(self, data, batch_size=None, steps=None): During `adapt()`, the layer will build a vocabulary of all string tokens seen in the dataset, sorted by occurance count, with ties broken by sort order of the tokens (high to low). At the end of `adapt()`, if `max_tokens` - is set, the voculary wil be truncated to `max_tokens` size. For example, + is set, the vocabulary wil be truncated to `max_tokens` size. For example, adapting a layer with `max_tokens=1000` will compute the 1000 most frequent tokens occurring in the input dataset. If `output_mode='tf-idf'`, `adapt()` will also learn the document frequencies of each token in the input dataset.