Skip to content

Commit

Permalink
updated jenkins tests
Browse files Browse the repository at this point in the history
Signed-off-by: ericharper <[email protected]>
  • Loading branch information
ericharper committed Jul 22, 2020
1 parent 5d0f319 commit db1eaeb
Showing 1 changed file with 22 additions and 22 deletions.
44 changes: 22 additions & 22 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -91,13 +91,13 @@ pipeline {

stage('L2: Speech 2 Text dev run') {
steps {
sh 'python examples/asr/speech_to_text.py model.train_ds.manifest_filepath=/home/TestData/an4_dataset/an4_train.json model.validation_ds.manifest_filepath=/home/TestData/an4_dataset/an4_val.json pl.trainer.gpus=1 +pl.trainer.fast_dev_run=True'
sh 'python examples/asr/speech_to_text.py model.train_ds.manifest_filepath=/home/TestData/an4_dataset/an4_train.json model.validation_ds.manifest_filepath=/home/TestData/an4_dataset/an4_val.json trainer.gpus=1 +trainer.fast_dev_run=True'
}
}

stage('L2: Speaker Recognition dev run') {
steps {
sh 'python examples/speaker_recognition/speaker_reco.py model.train_ds.batch_size=10 model.validation_ds.batch_size=2 model.train_ds.manifest_filepath=/home/TestData/an4_speaker/train.json model.validation_ds.manifest_filepath=/home/TestData/an4_speaker/dev.json pl.trainer.gpus=[1] +pl.trainer.fast_dev_run=True'
sh 'python examples/speaker_recognition/speaker_reco.py model.train_ds.batch_size=10 model.validation_ds.batch_size=2 model.train_ds.manifest_filepath=/home/TestData/an4_speaker/train.json model.validation_ds.manifest_filepath=/home/TestData/an4_speaker/dev.json trainer.gpus=[1] +trainer.fast_dev_run=True'
}
}

Expand All @@ -118,10 +118,10 @@ pipeline {
model.validation_ds.file=/home/TestData/nlp/squad_mini/v1.1/dev-v1.1.json \
model.language_model.pretrained_model_name=bert-base-uncased \
model.version_2_with_negative=false \
pl.trainer.precision=16 \
pl.trainer.amp_level=O1 \
pl.trainer.gpus=[0] \
+pl.trainer.fast_dev_run=true \
trainer.precision=16 \
trainer.amp_level=O1 \
trainer.gpus=[0] \
+trainer.fast_dev_run=true \
'
sh 'rm -rf examples/nlp/question_answering/NeMo_experiments && \
rm -rf /home/TestData/nlp/squad_mini/v1.1/*cache*'
Expand All @@ -135,10 +135,10 @@ pipeline {
model.validation_ds.file=/home/TestData/nlp/squad_mini/v2.0/dev-v2.0.json \
model.language_model.pretrained_model_name=bert-base-uncased \
model.version_2_with_negative=true \
pl.trainer.precision=16 \
pl.trainer.amp_level=O1 \
pl.trainer.gpus=[1] \
+pl.trainer.fast_dev_run=true \
trainer.precision=16 \
trainer.amp_level=O1 \
trainer.gpus=[1] \
+trainer.fast_dev_run=true \
'
sh 'rm -rf examples/nlp/question_answering/NeMo_experiments && \
rm -rf /home/TestData/nlp/squad_mini/v2.0/*cache*'
Expand All @@ -165,10 +165,10 @@ pipeline {
model.language_model.do_lower_case=true \
model.language_model.pretrained_model_name=roberta-base \
model.version_2_with_negative=false \
pl.trainer.precision=16 \
pl.trainer.amp_level=O1 \
pl.trainer.gpus=[0] \
+pl.trainer.fast_dev_run=true \
trainer.precision=16 \
trainer.amp_level=O1 \
trainer.gpus=[0] \
+trainer.fast_dev_run=true \
'
sh 'rm -rf examples/nlp/question_answering/NeMo_experiments && \
rm -rf /home/TestData/nlp/squad_mini/v1.1/*cache*'
Expand All @@ -183,10 +183,10 @@ pipeline {
model.language_model.do_lower_case=true \
model.language_model.pretrained_model_name=roberta-base \
model.version_2_with_negative=true \
pl.trainer.precision=16 \
pl.trainer.amp_level=O1 \
pl.trainer.gpus=[1] \
+pl.trainer.fast_dev_run=true \
trainer.precision=16 \
trainer.amp_level=O1 \
trainer.gpus=[1] \
+trainer.fast_dev_run=true \
'
sh 'rm -rf examples/nlp/question_answering/NeMo_experiments && \
rm -rf /home/TestData/nlp/squad_mini/v2.0/*cache*'
Expand All @@ -200,7 +200,7 @@ pipeline {
parallel {
stage ('Text Classification with BERT Test') {
steps {
sh 'cd examples/nlp/text_classification && python text_classification_with_bert.py pl.trainer.gpus=1 model.language_model.pretrained_model_name=bert-base-uncased pl.trainer.max_epochs=1 model.language_model.max_seq_length=50 model.data_dir=/home/TestData/nlp/retail/ model.validation_ds.prefix=dev model.train_ds.batch_size=10 model.train_ds.num_samples=-1 model.language_model.do_lower_case=true'
sh 'cd examples/nlp/text_classification && python text_classification_with_bert.py trainer.gpus=1 model.language_model.pretrained_model_name=bert-base-uncased trainer.max_epochs=1 model.language_model.max_seq_length=50 model.data_dir=/home/TestData/nlp/retail/ model.validation_ds.prefix=dev model.train_ds.batch_size=10 model.train_ds.num_samples=-1 model.language_model.do_lower_case=true'
sh 'rm -rf examples/nlp/text_classification/outputs'
}
}
Expand Down Expand Up @@ -243,7 +243,7 @@ pipeline {
failFast true
steps {
sh 'cd examples/nlp/token_classification && python ner.py \
model.data_dir=/home/TestData/nlp/token_classification_punctuation/ +pl.trainer.fast_dev_run=true \
model.data_dir=/home/TestData/nlp/token_classification_punctuation/ +trainer.fast_dev_run=true \
model.use_cache=false'
}
}
Expand All @@ -257,8 +257,8 @@ pipeline {
failFast true
steps {
sh 'cd examples/nlp/token_classification && python punctuation_capitalization.py \
model.data_dir=/home/TestData/nlp/token_classification_punctuation/ +pl.trainer.fast_dev_run=true \
pl.trainer.gpus=2 pl.trainer.distributed_backend=ddp \
model.data_dir=/home/TestData/nlp/token_classification_punctuation/ +trainer.fast_dev_run=true \
trainer.gpus=2 trainer.distributed_backend=ddp \
model.language_model.pretrained_model_name=distilbert-base-uncased \
model.use_cache=false'
}
Expand Down

0 comments on commit db1eaeb

Please sign in to comment.