Skip to content

Commit

Permalink
Advanced tokenization (#42)
Browse files Browse the repository at this point in the history
* Add the OpenNMT tokenizer as a third party

* Add detokenization logic

* Add learn_bpe.py script

* Replace spaces in CharacterTokenizer

* Fix encoding issue when redirecting tokenization output

* Move stream tokenization to the library

* Build tokenizer plugin in Travis

* Add BLEU evaluator variant which applies a light tokenization

* Cleanup

* Add documentation

* Fix link

* Complete the README

* Fix tokenization configuration loading

* Add missing instruction
  • Loading branch information
guillaumekln authored Jan 3, 2018
1 parent 009a592 commit 298f117
Show file tree
Hide file tree
Showing 26 changed files with 760 additions and 51 deletions.
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[submodule "third_party/OpenNMTTokenizer"]
path = third_party/OpenNMTTokenizer
url = https://github.com/OpenNMT/Tokenizer.git
20 changes: 19 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,18 @@ language: python
python:
- "2.7"
- "3.5"
install:
addons:
apt:
sources:
- george-edison55-precise-backports
- ubuntu-toolchain-r-test
packages:
- gcc-4.8
- g++-4.8
- cmake
- cmake-data
- libboost-python-dev
before_install:
- pip install tensorflow==1.4.0
- pip install pyyaml
- pip install nose2
Expand All @@ -14,6 +25,13 @@ install:
pip install sphinx_rtd_theme
pip install recommonmark
fi
install:
- export CXX="g++-4.8" CC="gcc-4.8"
- mkdir build && cd build
- cmake ..
- make
- export PYTHONPATH="$PYTHONPATH:$PWD/third_party/OpenNMTTokenizer/bindings/python/"
- cd ..
script:
- nose2
- if [ "$TRAVIS_PYTHON_VERSION" == "3.5" ]; then pylint opennmt/ bin/; fi
Expand Down
8 changes: 8 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
cmake_minimum_required(VERSION 3.1)

set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE Release)
set(LIB_ONLY ON)
set(WITH_PYTHON_BINDINGS ON)

add_subdirectory(third_party/OpenNMTTokenizer)
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ OpenNMT-tf focuses on modularity and extensibility using standard TensorFlow mod
* **hybrid encoder-decoder models**<br/>e.g. self-attention encoder and RNN decoder or vice versa.
* **multi-source training**<br/>e.g. source text and Moses translation as inputs for machine translation.
* **multiple input format**<br/>text with support of mixed word/character embeddings or real vectors serialized in *TFRecord* files.
* **on-the-fly tokenization**<br/>apply advanced tokenization dynamically during the training and detokenize the predictions during inference or evaluation.

and all of the above can be used simultaneously to train novel and complex architectures. See the [predefined models](config/models) to discover how they are defined.

Expand Down Expand Up @@ -76,6 +77,8 @@ python -m bin.main infer --config config/opennmt-defaults.yml config/data/toy-en

**Note:** do not expect any good translation results with this toy example. Consider training on [larger parallel datasets](http://www.statmt.org/wmt16/translation-task.html) instead.

*For more advanced usages, see the [documentation](http://opennmt.net/OpenNMT-tf).*

## Compatibility with {Lua,Py}Torch implementations

OpenNMT-tf has been designed from scratch and compatibility with the {Lua,Py}Torch implementations in terms of usage, design, and features is not a priority. Please submit a feature request for any missing feature or behavior that you found useful in the {Lua,Py}Torch implementations.
Expand Down
8 changes: 2 additions & 6 deletions bin/build_vocab.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@
from opennmt import tokenizers
from opennmt import utils

from opennmt.utils.misc import get_classnames_in_module


def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
Expand All @@ -17,9 +15,6 @@ def main():
parser.add_argument(
"--save_vocab", required=True,
help="Output vocabulary file.")
parser.add_argument(
"--tokenizer", default="SpaceTokenizer", choices=get_classnames_in_module(tokenizers),
help="Tokenizer class name.")
parser.add_argument(
"--min_frequency", type=int, default=1,
help="Minimum word frequency.")
Expand All @@ -29,9 +24,10 @@ def main():
parser.add_argument(
"--without_sequence_tokens", default=False, action="store_true",
help="If set, do not add special sequence tokens (start, end) in the vocabulary.")
tokenizers.add_command_line_arguments(parser)
args = parser.parse_args()

tokenizer = getattr(tokenizers, args.tokenizer)()
tokenizer = tokenizers.build_tokenizer(args)

special_tokens = [constants.PADDING_TOKEN]
if not args.without_sequence_tokens:
Expand Down
22 changes: 22 additions & 0 deletions bin/detokenize_text.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
"""Standalone script to detokenize a corpus."""

from __future__ import print_function

import argparse

from opennmt import tokenizers


def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--delimiter", default=" ",
help="Token delimiter used in text serialization.")
tokenizers.add_command_line_arguments(parser)
args = parser.parse_args()

tokenizer = tokenizers.build_tokenizer(args)
tokenizer.detokenize_stream(delimiter=args.delimiter)

if __name__ == "__main__":
main()
15 changes: 3 additions & 12 deletions bin/tokenize_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,29 +3,20 @@
from __future__ import print_function

import argparse
import sys

from opennmt import tokenizers
from opennmt.utils.misc import get_classnames_in_module


def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--tokenizer", default="SpaceTokenizer", choices=get_classnames_in_module(tokenizers),
help="Tokenizer class name.")
parser.add_argument(
"--delimiter", default=" ",
help="Token delimiter for text serialization.")
tokenizers.add_command_line_arguments(parser)
args = parser.parse_args()

tokenizer = getattr(tokenizers, args.tokenizer)()

for line in sys.stdin:
line = line.strip()
tokens = tokenizer(line)
merged_tokens = args.delimiter.join(tokens)
print(merged_tokens)
tokenizer = tokenizers.build_tokenizer(args)
tokenizer.tokenize_stream(delimiter=args.delimiter)

if __name__ == "__main__":
main()
2 changes: 1 addition & 1 deletion config/sample.yml
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ train:
# (optional) Save evaluation predictions in model_dir/eval/.
save_eval_predictions: false
# (optional) Evalutator or list of evaluators that are called on the saved evaluation predictions.
# Available evaluators: BLEU
# Available evaluators: BLEU, BLEU-detok
external_evaluators: BLEU
# (optional) The maximum length of feature sequences during training (default: None).
maximum_features_length: 70
Expand Down
2 changes: 2 additions & 0 deletions config/tokenization/aggressive.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
mode: aggressive
joiner_annotate: true
12 changes: 12 additions & 0 deletions config/tokenization/sample.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# This is a sample tokenization configuration with all values set to their default.

mode: conservative
bpe_model_path: ""
joiner:
joiner_annotate: false
joiner_new: false
case_feature: false
segment_case: false
segment_numbers: false
segment_alphabet_change: false
segment_alphabet: []
2 changes: 1 addition & 1 deletion docs/data.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ The format of the data files is defined by the `opennmt.inputters.Inputter` used
All `opennmt.inputters.TextInputter`s expect a text file as input where:

* sentences are separated by a **newline**
* tokens are separated by a **space** (unless a custom tokenizer is set)
* tokens are separated by a **space** (unless a custom tokenizer is set, see [Tokenization](tokenization.html))

For example:

Expand Down
1 change: 1 addition & 0 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ Overview
:maxdepth: 1

data.md
tokenization.md
configuration.md
training.md
serving.md
Expand Down
7 changes: 7 additions & 0 deletions docs/package/opennmt.tokenizers.opennmt_tokenizer.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
opennmt\.tokenizers\.opennmt\_tokenizer module
==============================================

.. automodule:: opennmt.tokenizers.opennmt_tokenizer
:members:
:undoc-members:
:show-inheritance:
1 change: 1 addition & 0 deletions docs/package/opennmt.tokenizers.rst
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,6 @@ Submodules

.. toctree::

opennmt.tokenizers.opennmt_tokenizer
opennmt.tokenizers.tokenizer

89 changes: 89 additions & 0 deletions docs/tokenization.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# Tokenization

OpenNMT-tf can use the OpenNMT [Tokenizer](https://github.com/OpenNMT/Tokenizer) as a plugin to provide advanced tokenization behaviors.

## Installation

The following tools and packages are required:

* C++11 compiler
* CMake
* Boost.Python

On Ubuntu, these packages can be installed with `apt-get`:

```bash
sudo apt-get install build-essential gcc cmake libboost-python-dev
```

1\. Fetch the Tokenizer plugin under OpenNMT-tf repository:

```bash
git submodule update --init
```

2\. Compile the tokenizer plugin:

```bash
mkdir build && cd build
cmake .. && make
cd ..
```

3\. Configure your environment for Python to find the newly generated package:

```bash
export PYTHONPATH="$PYTHONPATH:$HOME/OpenNMT-tf/build/third_party/OpenNMTTokenizer/bindings/python/"
```

4\. Test the plugin:

```bash
$ echo "Hello world!" | python -m bin.tokenize_text --tokenizer OpenNMTTokenizer
Hello world !
```

## Usage

YAML files are used to set the tokenizer options to ensure consistency during data preparation and training. See the sample file `config/tokenization/sample.yml`.

Here is an example workflow:

1\. Build the vocabularies with the custom tokenizer, e.g.:

```bash
python -m bin.build_vocab --tokenizer OpenNMTTokenizer --tokenizer_config config/tokenization/aggressive.yml --size 50000 --save_vocab data/enfr/en-vocab.txt data/enfr/en-train.txt
python -m bin.build_vocab --tokenizer OpenNMTTokenizer --tokenizer_config config/tokenization/aggressive.yml --size 50000 --save_vocab data/enfr/fr-vocab.txt data/enfr/fr-train.txt
```

*The text files are only given as examples and are not part of the repository.*

2\. Update your model's `TextInputter`s to use the custom tokenizer, e.g.:

```python
return onmt.models.SequenceToSequence(
source_inputter=onmt.inputters.WordEmbedder(
vocabulary_file_key="source_words_vocabulary",
embedding_size=512,
tokenizer=onmt.tokenizers.OpenNMTTokenizer(
configuration_file_or_key="source_tokenizer_config")),
target_inputter=onmt.inputters.WordEmbedder(
vocabulary_file_key="target_words_vocabulary",
embedding_size=512,
tokenizer=onmt.tokenizers.OpenNMTTokenizer(
configuration_file_or_key="target_tokenizer_config")),
...)
```

3\. Reference the tokenizer configurations in the data configuration, e.g.:

```yaml
data:
source_tokenizer_config: config/tokenization/aggressive.yml
target_tokenizer_config: config/tokenization/aggressive.yml
```
## Notes
* As of now, tokenizers are not part of the exported graph.
* Predictions saved during inference or evaluation are detokenized. Consider using the "BLEU-detok" external evaluator that applies a simple word level tokenization before computing the BLEU score.
2 changes: 1 addition & 1 deletion opennmt/inputters/text_inputter.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ def _process(self, data):

if "tokens" not in data:
text = data["raw"]
tokens = self.tokenizer(text)
tokens = self.tokenizer.tokenize(text)
length = tf.shape(tokens)[0]

data = self.set_data_field(data, "tokens", tokens, padded_shape=[None], volatile=True)
Expand Down
4 changes: 2 additions & 2 deletions opennmt/models/sequence_to_sequence.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,5 +231,5 @@ def print_prediction(self, prediction, params=None, stream=None):

for i in range(n_best):
tokens = prediction["tokens"][i][:prediction["length"][i] - 1] # Ignore </s>.
sentence = b" ".join(tokens)
print_bytes(sentence, stream=stream)
sentence = self.target_inputter.tokenizer.detokenize(tokens)
print_bytes(tf.compat.as_bytes(sentence), stream=stream)
43 changes: 40 additions & 3 deletions opennmt/tests/tokenizer_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,25 +10,62 @@ class TokenizerTest(tf.test.TestCase):
def _testTokenizerOnTensor(self, tokenizer, text, ref_tokens):
ref_tokens = [tf.compat.as_bytes(token) for token in ref_tokens]
text = tf.constant(text)
tokens = tokenizer(text)
tokens = tokenizer.tokenize(text)
with self.test_session() as sess:
tokens = sess.run(tokens)
self.assertAllEqual(ref_tokens, tokens)

def _testTokenizerOnString(self, tokenizer, text, ref_tokens):
ref_tokens = [tf.compat.as_text(token) for token in ref_tokens]
tokens = tokenizer(text)
tokens = tokenizer.tokenize(text)
self.assertAllEqual(ref_tokens, tokens)

def _testTokenizer(self, tokenizer, text, ref_tokens):
self._testTokenizerOnTensor(tokenizer, text, ref_tokens)
self._testTokenizerOnString(tokenizer, text, ref_tokens)

def _testDetokenizerOnTensor(self, tokenizer, tokens, ref_text):
ref_text = tf.compat.as_bytes(ref_text)
tokens = tf.constant(tokens)
text = tokenizer.detokenize(tokens)
with self.test_session() as sess:
text = sess.run(text)
self.assertEqual(ref_text, text)

def _testDetokenizerOnBatchTensor(self, tokenizer, tokens, ref_text):
ref_text = [tf.compat.as_bytes(t) for t in ref_text]
sequence_length = [len(x) for x in tokens]
max_length = max(sequence_length)
tokens = [tok + [""] * (max_length - len(tok)) for tok in tokens]
tokens = tf.constant(tokens)
sequence_length = tf.constant(sequence_length)
text = tokenizer.detokenize(tokens, sequence_length=sequence_length)
with self.test_session() as sess:
text = sess.run(text)
self.assertAllEqual(ref_text, text)

def _testDetokenizerOnString(self, tokenizer, tokens, ref_text):
tokens = [tf.compat.as_text(token) for token in tokens]
ref_text = tf.compat.as_text(ref_text)
text = tokenizer.detokenize(tokens)
self.assertAllEqual(ref_text, text)

def _testDetokenizer(self, tokenizer, tokens, ref_text):
self._testDetokenizerOnBatchTensor(tokenizer, tokens, ref_text)
for tok, ref in zip(tokens, ref_text):
self._testDetokenizerOnTensor(tokenizer, tok, ref)
self._testDetokenizerOnString(tokenizer, tok, ref)

def testSpaceTokenizer(self):
self._testTokenizer(SpaceTokenizer(), "Hello world !", ["Hello", "world", "!"])
self._testDetokenizer(
SpaceTokenizer(),
[["Hello", "world", "!"], ["Test"], ["My", "name"]],
["Hello world !", "Test", "My name"])

def testCharacterTokenizer(self):
self._testTokenizer(CharacterTokenizer(), "a b", ["a", " ", "b"])
self._testTokenizer(CharacterTokenizer(), "a b", ["a", "▁", "b"])
self._testDetokenizer(CharacterTokenizer(), [["a", "▁", "b"]], ["a b"])
self._testTokenizer(CharacterTokenizer(), "你好,世界!", ["你", "好", ",", "世", "界", "!"])


Expand Down
Loading

0 comments on commit 298f117

Please sign in to comment.