From c3b7d333ffc3bad26cf1ea0ebfd52ce761ae3059 Mon Sep 17 00:00:00 2001 From: fayejf <36722593+fayejf@users.noreply.github.com> Date: Thu, 12 May 2022 10:32:28 -0700 Subject: [PATCH] notebooks' link, typo and import fix (#4158) * redo missing pr 4007 Signed-off-by: fayejf * remove extremely unreliable links Signed-off-by: fayejf --- .../asr/Offline_ASR_with_VAD_for_CTC_models.ipynb | 8 ++++---- tutorials/asr/Speech_Commands.ipynb | 10 +++------- tutorials/asr/Voice_Activity_Detection.ipynb | 6 +++--- .../Speaker_Identification_Verification.ipynb | 10 +++------- 4 files changed, 13 insertions(+), 21 deletions(-) diff --git a/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb b/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb index af6c8ffc477e..61675ec37e87 100644 --- a/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb +++ b/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb @@ -43,7 +43,7 @@ "import torch\n", "import os\n", "from nemo.collections.asr.metrics.wer import word_error_rate\n", - "from nemo.collections.asr.parts.utils.vad_utils import stitch_segmented_asr_output, contruct_manfiest_eval" + "from nemo.collections.asr.parts.utils.vad_utils import stitch_segmented_asr_output, construct_manfiest_eval" ] }, { @@ -320,7 +320,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If we have ground-truth 'text' in input_manifest, we can evaluate our performance of stitched output. Let's align the 'text' in input manifest and 'pred_text' in stitched segmented asr output first, since some samples from input_manfiest might be pure noise and have been removed in VAD output and excluded for ASR inference. " + "If we have ground-truth 'text' in input_manifest, we can evaluate our performance of stitched output. Let's align the 'text' in input manifest and 'pred_text' in stitched segmented asr output first, since some samples from input_manifest might be pure noise and have been removed in VAD output and excluded for ASR inference. " ] }, { @@ -329,7 +329,7 @@ "metadata": {}, "outputs": [], "source": [ - "aligned_vad_asr_output_manifest = contruct_manfiest_eval(input_manifest, stitched_output_manifest)" + "aligned_vad_asr_output_manifest = construct_manifest_eval(input_manifest, stitched_output_manifest)" ] }, { @@ -386,4 +386,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/asr/Speech_Commands.ipynb b/tutorials/asr/Speech_Commands.ipynb index fc40552aca1c..13c37c33455a 100644 --- a/tutorials/asr/Speech_Commands.ipynb +++ b/tutorials/asr/Speech_Commands.ipynb @@ -643,17 +643,13 @@ "\n", "We can dramatically improve the time taken to train this model by using Multi GPU training along with Mixed Precision.\n", "\n", - "For multi-GPU training, take a look at [the PyTorch Lightning Multi-GPU training section](https://pytorch-lightning.readthedocs.io/en/latest/advanced/multi_gpu.html)\n", - "\n", - "For mixed-precision training, take a look at [the PyTorch Lightning Mixed-Precision training section](https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html#mixed-precision-16-bit-training)\n", - "\n", "```python\n", - "# Mixed precision:\n", - "trainer = Trainer(amp_level='O1', precision=16)\n", - "\n", "# Trainer with a distributed backend:\n", "trainer = Trainer(devices=2, num_nodes=2, accelerator='gpu', strategy='dp')\n", "\n", + "# Mixed precision:\n", + "trainer = Trainer(amp_level='O1', precision=16)\n", + "\n", "# Of course, you can combine these flags as well.\n", "```" ] diff --git a/tutorials/asr/Voice_Activity_Detection.ipynb b/tutorials/asr/Voice_Activity_Detection.ipynb index 19a687e0b217..3c7b848c6d5e 100644 --- a/tutorials/asr/Voice_Activity_Detection.ipynb +++ b/tutorials/asr/Voice_Activity_Detection.ipynb @@ -657,12 +657,12 @@ "We can dramatically improve the time taken to train this model by using Multi GPU training along with Mixed Precision.\n", "\n", "```python\n", - "# Mixed precision:\n", - "trainer = Trainer(amp_level='O1', precision=16)\n", - "\n", "# Trainer with a distributed backend:\n", "trainer = Trainer(devices=2, num_nodes=2, accelerator='gpu', strategy='dp')\n", "\n", + "# Mixed precision:\n", + "trainer = Trainer(amp_level='O1', precision=16)\n", + "\n", "# Of course, you can combine these flags as well.\n", "```" ] diff --git a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb index 5e5b5c9fd4ba..f2d0a45327a2 100644 --- a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb +++ b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb @@ -628,18 +628,14 @@ "## For Faster Training\n", "We can dramatically improve the time taken to train this model by using Multi GPU training along with Mixed Precision.\n", "\n", - "For multi-GPU training, take a look at the [PyTorch Lightning Multi-GPU training section](https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html)\n", - "\n", - "For mixed-precision training, take a look at the [PyTorch Lightning Mixed-Precision training section](https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html#mixed-precision-16-bit-training)\n", + "### Trainer with a distributed backend:\n", + "
trainer = Trainer(devices=2, num_nodes=2, accelerator='gpu', strategy='dp')\n",
+                "
\n", "\n", "### Mixed precision:\n", "
trainer = Trainer(amp_level='O1', precision=16)\n",
                 "
\n", "\n", - "### Trainer with a distributed backend:\n", - "
trainer = Trainer(devices=2, num_nodes=2, accelerator='gpu', strategy='dp')\n",
-                "
\n", - "\n", "Of course, you can combine these flags as well." ] },