From 88f6574d0c6426432be92ad4caa451d290273d7b Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Thu, 11 Apr 2024 14:26:53 -0400 Subject: [PATCH] Lightweight workflow to check for broken markdown links (#1271) --- .github/workflows/check-links.yml | 38 +++++++++++++++++++++++++++++++ extensions/thunder/README.md | 2 +- tutorials/prepare_dataset.md | 2 +- tutorials/pretrain_tinyllama.md | 8 ++----- 4 files changed, 42 insertions(+), 8 deletions(-) create mode 100644 .github/workflows/check-links.yml diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml new file mode 100644 index 0000000000..0edf0589cc --- /dev/null +++ b/.github/workflows/check-links.yml @@ -0,0 +1,38 @@ +name: Check Markdown Links + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + check-links: + runs-on: ubuntu-latest + + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + + - name: Install Markdown Link Checker + run: npm install -g markdown-link-check + + - name: Create config for markdown link checker + run: | + echo '{ + "projectBaseUrl":"${{ github.workspace }}", + "ignorePatterns": [ + { + "pattern": "^#" + }, + { + "pattern": "^https://falconllm.tii.ae" + } + ] + }' > $GITHUB_WORKSPACE/md_checker_config.json + + - name: Find Markdown Files and Check Links + run: | + find . -name \*.md -print0 | xargs -0 -n1 markdown-link-check -c $GITHUB_WORKSPACE/md_checker_config.json diff --git a/extensions/thunder/README.md b/extensions/thunder/README.md index 7d15103475..715e7745fa 100644 --- a/extensions/thunder/README.md +++ b/extensions/thunder/README.md @@ -532,7 +532,7 @@ def backward_fn(saved_for_backward, cotangents): t763 = unsloth_apply_rope_backward(t757, t21, t22, 1, 8, 4) # t763: "cuda:0 f32[2, 4, 3, 16]" ``` -We provide a specific [pre-training script copy](unsloth/pretrain.py) that uses this executor. +We provide a specific [pre-training script copy](pretrain.py) that uses this executor. Given the Unsloth results below, these hand-written kernels do not seem to be worth it, showcasing the power of automated fusion compilers like [NvFuser](https://github.com/NVIDIA/Fuser). ## Examples and benchmarks diff --git a/tutorials/prepare_dataset.md b/tutorials/prepare_dataset.md index b6f3204b10..867b612f91 100644 --- a/tutorials/prepare_dataset.md +++ b/tutorials/prepare_dataset.md @@ -6,7 +6,7 @@ Below is a table of all datasets that are currently supported in LitGPT: |--------------|-------------|---------------------|--------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Alpaca | Finetuning | 51,759 samples | [URL](https://github.com/tatsu-lab/stanford_alpaca) | [URL](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Attribution-NonCommercial 4.0 International, [URL](https://crfm.stanford.edu/2023/03/13/alpaca.html) | | Alpaca-2k | Finetuning | 2000 samples | [URL](https://huggingface.co/datasets/mhenrichsen/alpaca_2k_test) | See Alpaca above | See Alpaca Above | -| Alpaca-GPT4 | Finetuning | 52,002 samples | [URL](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) | [URL](https://arxiv.org/abs/2304.03277) | Attribution-NonCommercial 4.0 International, [URL](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM/blob/main/DATA_LICENSEl) | +| Alpaca-GPT4 | Finetuning | 52,002 samples | [URL](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) | [URL](https://arxiv.org/abs/2304.03277) | Attribution-NonCommercial 4.0 International, [URL](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM/blob/main/DATA_LICENSE) | | Alpaca Libre | Finetuning | 55,370 samples | [URL](https://github.com/mobarski/alpaca-libre) | - | CC0/MIT, [URL](https://github.com/mobarski/alpaca-libre) | | Deita | Finetuning | 9,500 samples | [URL](https://huggingface.co/datasets/HuggingFaceH4/deita-10k-v0-sft/tree/main/data) | [URL](https://arxiv.org/abs/2312.15685) | MIT [URL](https://huggingface.co/datasets/hkust-nlp/deita-10k-v0/blob/main/README.md) | | Dolly | Finetuning | 15,011 samples | [URL](https://github.com/databrickslabs/dolly/tree/master/data) | [URL](https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm) | CC-BY-SA, [URL](https://github.com/databrickslabs/dolly#model-overview) | diff --git a/tutorials/pretrain_tinyllama.md b/tutorials/pretrain_tinyllama.md index f4976ee097..8b27fb147f 100644 --- a/tutorials/pretrain_tinyllama.md +++ b/tutorials/pretrain_tinyllama.md @@ -118,11 +118,7 @@ or change the model type and size by passing a different string to the model nam litgpt pretrain --model_name Gemma-2b ``` -The currently supported model names are contained in the [config.py](https://github.com/Lightning-AI/litgpt/litgpt/config.py) file. -You can - -1) either search this file for lines containing "name =", -2) or run `litgpt download` without additional command line arguments +The currently supported model names can be listed by executing `litgpt pretrain` without any additional arguments. Keep in mind that training with a single machine will take weeks. To speed up the process, you'll need access to a cluster. Once you're in a cluster, you can follow [these instructions](https://lightning.ai/docs/fabric/stable/fundamentals/launch.html#launch-on-a-cluster) @@ -190,4 +186,4 @@ The following [Lightning Studio](https://lightning.ai/lightning-ai/studios) temp |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| |

[Prepare the TinyLlama 1T token dataset](https://lightning.ai/lightning-ai/studios/prepare-the-tinyllama-1t-token-dataset)
[

](https://lightning.ai/lightning-ai/studios/prepare-the-tinyllama-1t-token-dataset) | [Pretrain LLMs - TinyLlama 1.1B](https://lightning.ai/lightning-ai/studios/pretrain-llms-tinyllama-1-1b)

[

](https://lightning.ai/lightning-ai/studios/pretrain-llms-tinyllama-1-1b) | | [Continued Pretraining with TinyLlama 1.1B](https://lightning.ai/lightning-ai/studios/continued-pretraining-with-tinyllama-1-1b)

[

](https://lightning.ai/lightning-ai/studios/continued-pretraining-with-tinyllama-1-1b) | | -| | \ No newline at end of file +| | \ No newline at end of file