diff --git a/.github/workflows/auto-merge.yml b/.github/workflows/auto-merge.yml
index c3e90bc18ad8..44b409ea7e1f 100644
--- a/.github/workflows/auto-merge.yml
+++ b/.github/workflows/auto-merge.yml
@@ -1,7 +1,7 @@
name: auto-merge
on:
- pull_request:
+ pull_request_target:
branches:
- main
@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
if: github.actor == 'dependabot[bot]'
steps:
- - uses: ahmadnassri/action-dependabot-auto-merge@v2.3
+ - uses: ahmadnassri/action-dependabot-auto-merge@v2.4
with:
github-token: ${{ secrets.AUTOMERGE_TOKEN }}
command: "squash and merge"
diff --git a/.github/workflows/content-origin-request.yml b/.github/workflows/content-origin-request.yml
new file mode 100644
index 000000000000..d5637420608b
--- /dev/null
+++ b/.github/workflows/content-origin-request.yml
@@ -0,0 +1,62 @@
+# This starts up a simulator that tries to do what our Lambda@Edge does.
+
+name: content-origin-request
+
+on:
+ pull_request:
+ branches:
+ - main
+ paths:
+ - deployer/aws-lambda/**
+ - libs/**
+ - .github/workflows/content-origin-request.yml
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup Node.js environment
+ uses: actions/setup-node@v2.1.5
+ with:
+ node-version: "12"
+
+ - name: Cache node_modules
+ uses: actions/cache@v2.1.4
+ id: cached-node_modules
+ with:
+ path: |
+ deployer/aws-lambda/content-origin-request/node_modules
+ key: ${{ runner.os }}-${{ hashFiles('deployer/aws-lambda/content-origin-request/yarn.lock') }}-${{ hashFiles('libs/**/*.js') }}
+
+ - name: Install all yarn packages
+ if: steps.cached-node_modules.outputs.cache-hit != 'true'
+ working-directory: deployer/aws-lambda/content-origin-request
+ run: yarn --frozen-lockfile
+
+ - name: Run test server
+ working-directory: deployer/aws-lambda/content-origin-request
+ run: |
+ yarn serve > /tmp/stdout.log 2> /tmp/stderr.log &
+
+ - name: Check that the server started
+ run: curl --retry-connrefused --retry 5 -I http://localhost:7000/ping
+
+ - name: Preflight the integration tests
+ run: |
+ curl -I http://localhost:7000/docs/Web
+ curl -I http://localhost:7000/en-US/docs/Web/
+
+ - name: Unit test
+ working-directory: deployer/aws-lambda/content-origin-request
+ run: |
+ yarn test-server
+
+ - name: Debug any server outputs
+ run: |
+ echo "____STDOUT____"
+ cat /tmp/stdout.log
+ echo "____STDERR____"
+ cat /tmp/stderr.log
diff --git a/.github/workflows/dev-build.yml b/.github/workflows/dev-build.yml
index e9dc381a6025..f38427d16951 100644
--- a/.github/workflows/dev-build.yml
+++ b/.github/workflows/dev-build.yml
@@ -21,10 +21,6 @@ on:
description: "Build archived content"
required: false
default: "false"
- translated_content:
- description: "Build translated content"
- required: false
- default: "true"
# This is very useful when combined with the "Use workflow from"
# feature that is built into the "Run workflow" button on
@@ -66,13 +62,12 @@ jobs:
path: mdn/archived-content
- uses: actions/checkout@v2
- if: "contains(github.event.inputs.translated_content, 'true')"
with:
- repository: mdn/translated-content-rendered
+ repository: mdn/translated-content
path: mdn/translated-content
- name: Setup Node.js environment
- uses: actions/setup-node@v2.1.4
+ uses: actions/setup-node@v2.1.5
with:
node-version: "12"
@@ -96,7 +91,7 @@ jobs:
python-version: "3.8"
- name: Install Python poetry
- uses: snok/install-poetry@v1.1.1
+ uses: snok/install-poetry@v1.1.2
- name: Install deployer
run: |
@@ -112,7 +107,6 @@ jobs:
run: |
echo "notes: ${{ github.event.inputs.notes }}"
echo "archived_content: ${{ github.event.inputs.archived_content }}"
- echo "translated_content: ${{ github.event.inputs.translated_content }}"
echo "log_each_successful_upload: ${{ github.event.inputs.log_each_successful_upload }}"
echo "deployment_prefix: ${{ github.event.inputs.deployment_prefix }}"
@@ -121,6 +115,7 @@ jobs:
# Remember, the mdn/content repo got cloned into `pwd` into a
# sub-folder called "mdn/content"
CONTENT_ROOT: ${{ github.workspace }}/mdn/content/files
+ CONTENT_TRANSLATED_ROOT: ${{ github.workspace }}/mdn/translated-content/files
# This basically means that all live-sample iframes run on the same
# host as the page that includes the iframe. Not great security but the
@@ -152,12 +147,6 @@ jobs:
else
echo "Will NOT build mdn/archived-content too"
fi
- if [ ${{ github.event.inputs.translated_content }} == "true" ]; then
- echo "Will build mdn/translated-content too"
- export CONTENT_TRANSLATED_ROOT=${{ github.workspace }}/mdn/translated-content/files
- else
- echo "Will NOT build mdn/translated-content too"
- fi
# Info about which CONTENT_* environment variables were set and to what.
echo "CONTENT_ROOT=$CONTENT_ROOT"
@@ -165,6 +154,7 @@ jobs:
echo "CONTENT_TRANSLATED_ROOT=$CONTENT_TRANSLATED_ROOT"
yarn prepare-build
+ yarn tool sync-translated-content
yarn build
# TODO: When the deployer is available this is where we
@@ -175,6 +165,7 @@ jobs:
env:
# Set the CONTENT_ROOT first
CONTENT_ROOT: ${{ github.workspace }}/mdn/content/files
+ CONTENT_TRANSLATED_ROOT: ${{ github.workspace }}/mdn/translated-content/files
DEPLOYER_BUCKET_NAME: mdn-content-dev
DEPLOYER_BUCKET_PREFIX: ${{ github.event.inputs.deployment_prefix }}
@@ -209,10 +200,11 @@ jobs:
poetry run deployer whatsdeployed --output ../client/build/_whatsdeployed/code.json
poetry run deployer whatsdeployed --output ../client/build/_whatsdeployed/content.json $CONTENT_ROOT
+ poetry run deployer whatsdeployed --output ../client/build/_whatsdeployed/translated-content.json $CONTENT_TRANSLATED_ROOT
# XXX would be nice to validate here that $DEPLOYER_BUCKET_PREFIX is truthy
echo "DEPLOYER_BUCKET_PREFIX=$DEPLOYER_BUCKET_PREFIX"
- poetry run deployer upload ../client/build
+ poetry run deployer upload --prune ../client/build
poetry run deployer update-lambda-functions ./aws-lambda
# TODO
# Execute command to tell the Dev CloudFront distribution to use the
diff --git a/.github/workflows/developing.yml b/.github/workflows/developing.yml
index e8a308c1eec9..d3ffdfb494e5 100644
--- a/.github/workflows/developing.yml
+++ b/.github/workflows/developing.yml
@@ -16,7 +16,7 @@ jobs:
path: mdn/content
- name: Setup Node.js environment
- uses: actions/setup-node@v2.1.4
+ uses: actions/setup-node@v2.1.5
with:
node-version: "12"
@@ -62,7 +62,7 @@ jobs:
- name: Wait for servers
run: |
# Just a slight delay to wait until the dev server is ready.
- sleep 5
+ sleep 3
curl --retry-connrefused --retry 5 http://localhost:5000 > /dev/null
curl --retry-connrefused --retry 5 --silent http://localhost:3000 > /dev/null
@@ -76,15 +76,13 @@ jobs:
# of the yarn installs above
PUPPETEER_EXECUTABLE_PATH: /usr/bin/google-chrome
run: |
- status=0
- yarn test:testing developing || (
- status=$?
- echo "Testing failed! Going to dump stdout and stderr"
- echo "STDOUT..................................................."
- cat /tmp/stdout.log
- echo "STDERR..................................................."
- cat /tmp/stderr.log
- echo $status
- exit $status
- )
- exit $status
+ yarn test:testing developing
+
+ - name: Debug server's stdout and stderr if tests failed
+ if: failure()
+ run: |
+ echo "STDOUT..................................................."
+ cat /tmp/stdout.log
+ echo ""
+ echo "STDERR..................................................."
+ cat /tmp/stderr.log
diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml
index daaed940d367..68570660e866 100644
--- a/.github/workflows/npm-publish.yml
+++ b/.github/workflows/npm-publish.yml
@@ -21,7 +21,7 @@ jobs:
fetch-depth: 10
- name: Setup Node.js environment
- uses: actions/setup-node@v2.1.4
+ uses: actions/setup-node@v2.1.5
with:
node-version: "12"
diff --git a/.github/workflows/npm-published-simulation.yml b/.github/workflows/npm-published-simulation.yml
new file mode 100644
index 000000000000..ca13f0914828
--- /dev/null
+++ b/.github/workflows/npm-published-simulation.yml
@@ -0,0 +1,113 @@
+# Instead of waiting for Yari to be published to npmjs.com and be upgraded
+# inside mdn/content by Dependabot, we do all those steps here using `npm pack`.
+
+name: NPM Publish simulation
+
+on:
+ pull_request:
+ branches:
+ - main
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - uses: actions/checkout@v2
+ with:
+ repository: mdn/content
+ path: mdn/content
+
+ - name: Setup Node.js environment
+ uses: actions/setup-node@v2.1.5
+ with:
+ node-version: "12"
+
+ - name: Cache node_modules
+ uses: actions/cache@v2.1.4
+ id: cached-node_modules
+ with:
+ path: |
+ node_modules
+ key: ${{ runner.os }}-${{ hashFiles('yarn.lock') }}
+
+ - name: Install all yarn packages
+ if: steps.cached-node_modules.outputs.cache-hit != 'true'
+ env:
+ PUPPETEER_SKIP_CHROMIUM_DOWNLOAD: 1
+ run: yarn --frozen-lockfile
+
+ - name: Setup kernel for react native, increase watchers
+ run: |
+ # When running Yari on Linux, you might get the
+ # "Error: ENOSPC: System limit for number of file watchers reached" error.
+ # This, resolves that.
+ # Source https://github.com/expo/expo-github-action/issues/20#issuecomment-541676895
+ echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p
+
+ - name: Prepare to build
+ env:
+ # The following env vars is what we do in npm-publish.yml
+ # Each variable set is documented there.
+
+ REACT_APP_CRUD_MODE: true
+ REACT_APP_DISABLE_AUTH: true
+ CONTENT_ROOT: testing/content/files
+ run: |
+ yarn prepare-build
+
+ - name: Build and install tarball
+ run: |
+ echo mdn/content/ >> .npmignore
+ npm pack
+ TARBALL=`ls mdn-yari-*.tgz`
+ echo $TARBALL
+ ls -lh $TARBALL
+ mv $TARBALL mdn/content/
+ cd mdn/content
+ yarn add file:$TARBALL
+
+ - name: Start Yari from mock content repo
+ working-directory: mdn/content
+ run: |
+ yarn start > /tmp/stdout.log 2> /tmp/stderr.log &
+
+ - name: View some URLs on localhost:5000
+ run: |
+ curl --retry-connrefused --retry 5 -I http://localhost:5000
+
+ # Basically, test if it 200 OKs. If not, this'll exit non-zero.
+ curl http://localhost:5000/en-US/ > /dev/null
+ curl http://localhost:5000/en-US/docs/MDN/Kitchensink > /dev/null
+
+ - name: Test viewing the dev server
+ env:
+ # This will make sure the tests in `testing/tests/*.test.js` only run
+ # if the development server is up and ready to be tested.
+ TESTING_DEVELOPING: true
+ # Use local chrome installs since we skip downloading it as part
+ # of the yarn installs above
+ PUPPETEER_EXECUTABLE_PATH: /usr/bin/google-chrome
+ # When running Yari from within mdn/content it only starts 1 server;
+ # the one on localhost:5000. No React dev server; the one
+ # on localhost:3000.
+ # Testing that dev server is not relevant or important in this context.
+ DEVELOPING_SKIP_DEV_URL: true
+ run: |
+ yarn test:testing developing
+
+ - name: Debug server's stdout and stderr if tests failed
+ if: failure()
+ run: |
+ echo "STDOUT..................................................."
+ cat /tmp/stdout.log
+ echo ""
+ echo "STDERR..................................................."
+ cat /tmp/stderr.log
+
+ - name: SSR build a page
+ working-directory: mdn/content
+ run: |
+ yarn build files/en-us/mdn/kitchensink/index.html
diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml
index 4404aac99750..6614649667ea 100644
--- a/.github/workflows/performance.yml
+++ b/.github/workflows/performance.yml
@@ -2,6 +2,11 @@ name: Performance
on:
pull_request:
+ paths:
+ - client/src/**
+ - .github/workflows/performance.yml
+ - package.json
+ - yarn.lock
jobs:
lighthouse:
@@ -20,7 +25,7 @@ jobs:
path: mdn/content
- name: Setup Node.js environment
- uses: actions/setup-node@v2.1.4
+ uses: actions/setup-node@v2.1.5
with:
node-version: "12"
diff --git a/.github/workflows/pr-deployer.yml b/.github/workflows/pr-deployer.yml
index c5570af173b2..fe8543e52715 100644
--- a/.github/workflows/pr-deployer.yml
+++ b/.github/workflows/pr-deployer.yml
@@ -19,7 +19,7 @@ jobs:
python-version: "3.8"
- name: Install Python poetry
- uses: snok/install-poetry@v1.1.1
+ uses: snok/install-poetry@v1.1.2
with:
virtualenvs-create: true
virtualenvs-in-project: true
diff --git a/.github/workflows/pr-docs.yml b/.github/workflows/pr-docs.yml
index 5851f53a70da..2225ed617504 100644
--- a/.github/workflows/pr-docs.yml
+++ b/.github/workflows/pr-docs.yml
@@ -14,7 +14,7 @@ jobs:
- uses: actions/checkout@v2
- name: Setup Node.js environment
- uses: actions/setup-node@v2.1.4
+ uses: actions/setup-node@v2.1.5
with:
node-version: "12"
diff --git a/.github/workflows/pr-kumascript.yml b/.github/workflows/pr-kumascript.yml
index 29b0928e0333..abd5f076fc76 100644
--- a/.github/workflows/pr-kumascript.yml
+++ b/.github/workflows/pr-kumascript.yml
@@ -24,7 +24,7 @@ jobs:
- uses: actions/checkout@v2
- name: Setup Node.js environment
- uses: actions/setup-node@v2.1.4
+ uses: actions/setup-node@v2.1.5
with:
node-version: "12"
diff --git a/.github/workflows/prod-build.yml b/.github/workflows/prod-build.yml
index fd39b54d66c4..faf02fdee7b6 100644
--- a/.github/workflows/prod-build.yml
+++ b/.github/workflows/prod-build.yml
@@ -13,7 +13,6 @@ env:
DEFAULT_DEPLOYMENT_PREFIX: "main"
DEFAULT_NOTES: ""
DEFAULT_ARCHIVED_CONTENT: "false"
- DEFAULT_TRANSLATED_CONTENT: "true"
DEFAULT_LOG_EACH_SUCCESSFUL_UPLOAD: "false"
on:
@@ -31,10 +30,6 @@ on:
description: "Build archived content"
required: false
default: ${DEFAULT_ARCHIVED_CONTENT}
- translated_content:
- description: "Build translated content"
- required: false
- default: ${DEFAULT_TRANSLATED_CONTENT}
# This is very useful when combined with the "Use workflow from"
# feature that is built into the "Run workflow" button on
@@ -78,7 +73,6 @@ jobs:
# can refer to later in `if: ....` lines or in bash with the `run: ...` blocks.
- name: Merge dispatch inputs with default env vars
run: |
- echo "BUILD_TRANSLATED_CONTENT=${{ github.event.inputs.translated_content || env.DEFAULT_TRANSLATED_CONTENT }}" >> $GITHUB_ENV
echo "BUILD_ARCHIVED_CONTENT=${{ github.event.inputs.archived_content || env.DEFAULT_ARCHIVED_CONTENT }}" >> $GITHUB_ENV
echo "DEPLOYER_BUCKET_PREFIX=${{ github.event.inputs.deployment_prefix || env.DEFAULT_DEPLOYMENT_PREFIX }}" >> $GITHUB_ENV
echo "DEPLOYER_LOG_EACH_SUCCESSFUL_UPLOAD=${{ github.event.inputs.log_each_successful_upload || env.DEFAULT_LOG_EACH_SUCCESSFUL_UPLOAD }}" >> $GITHUB_ENV
@@ -90,13 +84,12 @@ jobs:
path: mdn/archived-content
- uses: actions/checkout@v2
- if: "contains(env.BUILD_TRANSLATED_CONTENT, 'true')"
with:
- repository: mdn/translated-content-rendered
+ repository: mdn/translated-content
path: mdn/translated-content
- name: Setup Node.js environment
- uses: actions/setup-node@v2.1.4
+ uses: actions/setup-node@v2.1.5
with:
node-version: "12"
@@ -120,7 +113,7 @@ jobs:
python-version: "3.8"
- name: Install Python poetry
- uses: snok/install-poetry@v1.1.1
+ uses: snok/install-poetry@v1.1.2
- name: Install deployer
run: |
@@ -136,10 +129,8 @@ jobs:
run: |
echo "notes: ${{ github.event.inputs.notes || env.DEFAULT_NOTES }}"
echo "archived_content: ${{ github.event.inputs.archived_content || env.DEFAULT_ARCHIVED_CONTENT }}"
- echo "translated_content: ${{ github.event.inputs.translated_content || env.DEFAULT_TRANSLATED_CONTENT }}"
echo "log_each_successful_upload: ${{ github.event.inputs.log_each_successful_upload || env.DEFAULT_LOG_EACH_SUCCESSFUL_UPLOAD }}"
echo "deployment_prefix: ${{ github.event.inputs.deployment_prefix || env.DEFAULT_DEPLOYMENT_PREFIX }}"
- echo "BUILD_TRANSLATED_CONTENT: ${{ env.BUILD_TRANSLATED_CONTENT }}"
echo "BUILD_ARCHIVED_CONTENT: ${{ env.BUILD_ARCHIVED_CONTENT }}"
- name: Build everything
@@ -147,6 +138,7 @@ jobs:
# Remember, the mdn/content repo got cloned into `pwd` into a
# sub-folder called "mdn/content"
CONTENT_ROOT: ${{ github.workspace }}/mdn/content/files
+ CONTENT_TRANSLATED_ROOT: ${{ github.workspace }}/mdn/translated-content/files
# The default for this environment variable is geared for writers
# (aka. local development). Usually defaults are supposed to be for
@@ -174,12 +166,6 @@ jobs:
else
echo "Will NOT build mdn/archived-content too"
fi
- if [ ${{ env.BUILD_TRANSLATED_CONTENT }} == "true" ]; then
- echo "Will build mdn/translated-content too"
- export CONTENT_TRANSLATED_ROOT=${{ github.workspace }}/mdn/translated-content/files
- else
- echo "Will NOT build mdn/translated-content too"
- fi
# Info about which CONTENT_* environment variables were set and to what.
echo "CONTENT_ROOT=$CONTENT_ROOT"
@@ -187,6 +173,7 @@ jobs:
echo "CONTENT_TRANSLATED_ROOT=$CONTENT_TRANSLATED_ROOT"
yarn prepare-build
+ yarn tool sync-translated-content
yarn build
du -sh client/build
@@ -199,6 +186,7 @@ jobs:
# Set the CONTENT_ROOT first
CONTENT_ROOT: ${{ github.workspace }}/mdn/content/files
+ CONTENT_TRANSLATED_ROOT: ${{ github.workspace }}/mdn/translated-content/files
DEPLOYER_BUCKET_NAME: mdn-content-prod
@@ -222,12 +210,6 @@ jobs:
else
echo "Will NOT build mdn/archived-content too"
fi
- if [ ${{ env.BUILD_TRANSLATED_CONTENT }} == "true" ]; then
- echo "Will build mdn/translated-content too"
- export CONTENT_TRANSLATED_ROOT=${{ github.workspace }}/mdn/translated-content/files
- else
- echo "Will NOT build mdn/translated-content too"
- fi
# Info about which CONTENT_* environment variables were set and to what.
echo "CONTENT_ROOT=$CONTENT_ROOT"
@@ -238,6 +220,7 @@ jobs:
poetry run deployer whatsdeployed --output ../client/build/_whatsdeployed/code.json
poetry run deployer whatsdeployed --output ../client/build/_whatsdeployed/content.json $CONTENT_ROOT
+ poetry run deployer whatsdeployed --output ../client/build/_whatsdeployed/translated-content.json $CONTENT_TRANSLATED_ROOT
# XXX would be nice to validate here that $DEPLOYER_BUCKET_PREFIX is truthy
echo "DEPLOYER_BUCKET_PREFIX=$DEPLOYER_BUCKET_PREFIX"
diff --git a/.github/workflows/stage-build.yml b/.github/workflows/stage-build.yml
index 6335034f50bd..527987bd1184 100644
--- a/.github/workflows/stage-build.yml
+++ b/.github/workflows/stage-build.yml
@@ -13,7 +13,6 @@ env:
DEFAULT_DEPLOYMENT_PREFIX: "main"
DEFAULT_NOTES: ""
DEFAULT_ARCHIVED_CONTENT: "false"
- DEFAULT_TRANSLATED_CONTENT: "true"
DEFAULT_LOG_EACH_SUCCESSFUL_UPLOAD: "false"
on:
@@ -31,10 +30,6 @@ on:
description: "Build archived content"
required: false
default: ${DEFAULT_ARCHIVED_CONTENT}
- translated_content:
- description: "Build translated content"
- required: false
- default: ${DEFAULT_TRANSLATED_CONTENT}
# This is very useful when combined with the "Use workflow from"
# feature that is built into the "Run workflow" button on
@@ -78,7 +73,6 @@ jobs:
# can refer to later in `if: ....` lines or in bash with the `run: ...` blocks.
- name: Merge dispatch inputs with default env vars
run: |
- echo "BUILD_TRANSLATED_CONTENT=${{ github.event.inputs.translated_content || env.DEFAULT_TRANSLATED_CONTENT }}" >> $GITHUB_ENV
echo "BUILD_ARCHIVED_CONTENT=${{ github.event.inputs.archived_content || env.DEFAULT_ARCHIVED_CONTENT }}" >> $GITHUB_ENV
echo "DEPLOYER_BUCKET_PREFIX=${{ github.event.inputs.deployment_prefix || env.DEFAULT_DEPLOYMENT_PREFIX }}" >> $GITHUB_ENV
echo "DEPLOYER_LOG_EACH_SUCCESSFUL_UPLOAD=${{ github.event.inputs.log_each_successful_upload || env.DEFAULT_LOG_EACH_SUCCESSFUL_UPLOAD }}" >> $GITHUB_ENV
@@ -90,13 +84,12 @@ jobs:
path: mdn/archived-content
- uses: actions/checkout@v2
- if: "contains(env.BUILD_TRANSLATED_CONTENT, 'true')"
with:
- repository: mdn/translated-content-rendered
+ repository: mdn/translated-content
path: mdn/translated-content
- name: Setup Node.js environment
- uses: actions/setup-node@v2.1.4
+ uses: actions/setup-node@v2.1.5
with:
node-version: "12"
@@ -120,7 +113,7 @@ jobs:
python-version: "3.8"
- name: Install Python poetry
- uses: snok/install-poetry@v1.1.1
+ uses: snok/install-poetry@v1.1.2
- name: Install deployer
run: |
@@ -136,10 +129,8 @@ jobs:
run: |
echo "notes: ${{ github.event.inputs.notes || env.DEFAULT_NOTES }}"
echo "archived_content: ${{ github.event.inputs.archived_content || env.DEFAULT_ARCHIVED_CONTENT }}"
- echo "translated_content: ${{ github.event.inputs.translated_content || env.DEFAULT_TRANSLATED_CONTENT }}"
echo "log_each_successful_upload: ${{ github.event.inputs.log_each_successful_upload || env.DEFAULT_LOG_EACH_SUCCESSFUL_UPLOAD }}"
echo "deployment_prefix: ${{ github.event.inputs.deployment_prefix || env.DEFAULT_DEPLOYMENT_PREFIX }}"
- echo "BUILD_TRANSLATED_CONTENT: ${{ env.BUILD_TRANSLATED_CONTENT }}"
echo "BUILD_ARCHIVED_CONTENT: ${{ env.BUILD_ARCHIVED_CONTENT }}"
- name: Build everything
@@ -147,6 +138,7 @@ jobs:
# Remember, the mdn/content repo got cloned into `pwd` into a
# sub-folder called "mdn/content"
CONTENT_ROOT: ${{ github.workspace }}/mdn/content/files
+ CONTENT_TRANSLATED_ROOT: ${{ github.workspace }}/mdn/translated-content/files
# The default for this environment variable is geared for writers
# (aka. local development). Usually defaults are supposed to be for
@@ -172,6 +164,14 @@ jobs:
# '' nomatter what
# kind of document it is.
BUILD_ALWAYS_NO_ROBOTS: true
+
+ # (peterbe, Mar 2021) This is unique to Stage and temporary. We want to
+ # test the new Yari-based sign-in and sign-up. Setting this environment
+ # variable changes the behavior of the "Sign in" link in the top navbar.
+ # Once we know for sure that this new Yari sign in/up is working in Stage
+ # we'll make this the new default can we can remove these lines.
+ REACT_APP_USE_YARI_SIGNIN: true
+
run: |
if [ ${{ env.BUILD_ARCHIVED_CONTENT }} == "true" ]; then
echo "Will build mdn/archived-content too"
@@ -179,12 +179,6 @@ jobs:
else
echo "Will NOT build mdn/archived-content too"
fi
- if [ ${{ env.BUILD_TRANSLATED_CONTENT }} == "true" ]; then
- echo "Will build mdn/translated-content too"
- export CONTENT_TRANSLATED_ROOT=${{ github.workspace }}/mdn/translated-content/files
- else
- echo "Will NOT build mdn/translated-content too"
- fi
# Info about which CONTENT_* environment variables were set and to what.
echo "CONTENT_ROOT=$CONTENT_ROOT"
@@ -192,6 +186,7 @@ jobs:
echo "CONTENT_TRANSLATED_ROOT=$CONTENT_TRANSLATED_ROOT"
yarn prepare-build
+ yarn tool sync-translated-content
yarn build
du -sh client/build
@@ -204,6 +199,7 @@ jobs:
# Set the CONTENT_ROOT first
CONTENT_ROOT: ${{ github.workspace }}/mdn/content/files
+ CONTENT_TRANSLATED_ROOT: ${{ github.workspace }}/mdn/translated-content/files
DEPLOYER_BUCKET_NAME: mdn-content-stage
@@ -227,12 +223,6 @@ jobs:
else
echo "Will NOT build mdn/archived-content too"
fi
- if [ ${{ env.BUILD_TRANSLATED_CONTENT }} == "true" ]; then
- echo "Will build mdn/translated-content too"
- export CONTENT_TRANSLATED_ROOT=${{ github.workspace }}/mdn/translated-content/files
- else
- echo "Will NOT build mdn/translated-content too"
- fi
# Info about which CONTENT_* environment variables were set and to what.
echo "CONTENT_ROOT=$CONTENT_ROOT"
@@ -243,10 +233,11 @@ jobs:
poetry run deployer whatsdeployed --output ../client/build/_whatsdeployed/code.json
poetry run deployer whatsdeployed --output ../client/build/_whatsdeployed/content.json $CONTENT_ROOT
+ poetry run deployer whatsdeployed --output ../client/build/_whatsdeployed/translated-content.json $CONTENT_TRANSLATED_ROOT
# XXX would be nice to validate here that $DEPLOYER_BUCKET_PREFIX is truthy
echo "DEPLOYER_BUCKET_PREFIX=$DEPLOYER_BUCKET_PREFIX"
- poetry run deployer upload ../client/build
+ poetry run deployer upload --prune ../client/build
poetry run deployer update-lambda-functions ./aws-lambda
# TODO: Depending on how long the upload takes, consider switching to
diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml
index 2fa236187c00..398c9cc70d73 100644
--- a/.github/workflows/testing.yml
+++ b/.github/workflows/testing.yml
@@ -17,7 +17,7 @@ jobs:
- uses: actions/checkout@v2
- name: Setup Node.js environment
- uses: actions/setup-node@v2.1.4
+ uses: actions/setup-node@v2.1.5
with:
node-version: "12"
@@ -42,9 +42,6 @@ jobs:
- name: Lint ESLint
run: yarn eslint
- - name: Unit testing build
- run: yarn test:build
-
- name: Unit testing client
run: yarn test:client
diff --git a/.gitignore b/.gitignore
index 2ddde806867d..a5b2c0f65fe2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -72,5 +72,6 @@ mdn-yari-*.tgz
function.zip
testing/content/files/en-us/_githistory.json
+testing/translated-content/files/**/_githistory.json
# eslintcache
client/.eslintcache
diff --git a/README.md b/README.md
index c5f06bc3c83e..fd4d001b8390 100644
--- a/README.md
+++ b/README.md
@@ -5,52 +5,82 @@
## Quickstart
-Before you can begin with Yari, you need [Content](https://github.com/mdn/content).
-See its README which basically, says something like this:
+Development on `yari` involves updating the machinery that renders MDN content
+or improving the structure and styling of the MDN UI (e.g. the
+styling of the header). If you are more interested in contributing to the MDN
+content, you should check out the [content](https://github.com/mdn/content) repo
+README instead.
- git clone https://github.com/mdn/content.git mdn/content
+Before you can start working with Yari, you need to:
-Now, you just need to note where that folder is before you can start Yari.
+
+
-To run Yari locally, you'll first need to install [git](https://git-scm.com/),
-[Node.js](https://nodejs.org) (>= 12.0.0) and
-[Yarn 1](https://classic.yarnpkg.com/en/docs/install).
-After that, run these commands in your bash:
+1. Install [git](https://git-scm.com/),
+ [Node.js](https://nodejs.org) (>= 12.0.0), and [Yarn 1](https://classic.yarnpkg.com/en/docs/install).
+
+1. [Fork](https://docs.github.com/en/github/getting-started-with-github/fork-a-repo)
+ the MDN [content](https://github.com/mdn/content) and [yari](https://github.com/mdn/content)
+ repositories using the Fork button on GitHub.
+
+1. Clone the forked repositories to your computer using the following commands
+ (replace `[your account]` with the account you forked the repositories to):
+
+ git clone https://github.com/[your_account]/content.git
+ git clone https://github.com/[your_account]/yari.git
+
+ Take a note of the file path to the location where you've cloned that
+ repo before moving on.
+
+
+
+To run Yari locally, you'll first need to install its dependencies and build the
+app locally. Do this like so:
- git clone https://github.com/mdn/yari.git
cd yari
- yarn
+ yarn install
+
+Now run the following command to create a `.env` file inside your `yari` repo
+root and set the `CONTENT_ROOT` environment variable equal to the path to the
+`content` repo. This is so the Yari app can find the content it needs to render.
+You'll need to replace `/path/to/mdn/content/files` with the path to the
+`/files` folder inside your clone of the `content` repo:
+
echo CONTENT_ROOT=/path/to/mdn/content/files >> .env
+
+At this point, you can get started. Run the following lines to compile required
+files, start the Yari web server running, and open it in your browser:
+
yarn dev
open http://localhost:3000
-Make sure you point to the `/files` folder inside your clone of the content
-repo.
+If you prefer you can use `yarn start`, which will re-use any previously
+compiled files; this is "riskier" but faster. `yarn dev` always ensures that
+everything is up-to-date.
-If you prefer, you can fork the repo first and do the `git clone` with
-_your_ fork instead of the `mdn` one.
-
-The `yarn dev` command will compile and prepare certain files. This always
-takes a little extra time. If you prefer you can use `yarn start` which
-will re-use any previously compiled files which is "riskier" but faster.
-The `yarn start` command will also start a server which doesn't automatically
-reload when its source code files change, so use with caution.
+The `yarn start` command also starts a server with slightly different behavior —
+it doesn't automatically reload when its source code files change,
+so use with caution.
See also our [reviewing guide](docs/REVIEWING.md) for information on how to
review Yari changes.
### How to stay up-to-date
-Periodically, the code and the content changes. Make sure you're staying
-up-to-date with these commands:
+Periodically, the code and the content changes. Make sure you stay
+up-to-date with something along the following lines (replace `yari-origin`
+with whatever you called [the remote location](https://git-scm.com/docs/git-remote)
+of the original yari repo):
- git pull origin main
+ git pull yari-origin main
yarn
yarn dev
-These are also good steps to always take when you embark on making a change.
-Then, the only extra command needed is `git checkout -b my-new-branch`
-(or however you prefer to create new `git` branches)
+When you embark on making a change, do it on a new branch, for example
+`git checkout -b my-new-branch`.
## License
@@ -61,9 +91,11 @@ in the [mdn/content repository](https://github.com/mdn/content).
## How it works
-Yari is multiple things but at its core is the MDN content as `index.html`
-files, in `git`, that contain the metadata (as front-matter) and
-the bulk of the document.
+Yari does a number of things, the most important of which is to render and serve
+the MDN content found in the [content repo](https://github.com/mdn/content).
+Each document is stored as an `index.html` file that contains metadata presented
+as YAML [front-matter](https://github.com/mdn/content#fundamental-concepts)
+followed by the document source.
The builder converts these "source files" into "build files" using a CLI tool
that iterates over the files, builds the HTML, and lastly packages it up
@@ -71,12 +103,6 @@ with the front-end code, ready to be served as static files.
## Development
-First of all, development on `yari` can mean the source code (e.g. the
-styling of the header) or it can mean the content, since it's all one
-repo. This document doesn't distinguish between the two. In the future we
-might expand with more documentation specifically for contributing to the
-content exclusively.
-
The `yarn start` command encapsulates the front-end dev server
(on ) and the `server` (on ).
@@ -87,14 +113,15 @@ if you want to work more rapidly.
If you configure an environment variable called `EDITOR`, either on your
system as a whole or in the root `.env` file, it can be used in the development
-server to link to sources which, when clicked, opens in
-your preferred editor/IDE. For example, in the root:
+server to link to sources which, when clicked, open in your preferred
+editor/IDE. For example, in the root of the repo you could run:
echo 'EDITOR=code' >> .env
Now clicking certain links will open files directly in the currently open
-VSCode IDE. To test it, view any document on and
-click the "Open in your editor" button.
+VS Code IDE (replace `code` in the above command with a different text editor
+name if needed, e.g. `atom` or whatever). To test it, view any document on
+ and click the "Open in your editor" button.
### How the server works
@@ -114,29 +141,30 @@ And conveniently, if you're not even interested in what the flaws were, run:
yarn prettier-format
-But automatically when you ran `yarn` the first time (`yarn` is an alias for
-`yarn install`) it set up a `git` pre-commit hook that uses `pretty-quick`
-which is a wrapper on `prettier` that checks only the files in the git
+When you ran `yarn` for the first time (`yarn` is an alias for
+`yarn install`) it automatically sets up a `git` pre-commit hook that uses
+`pretty-quick` — a wrapper for `prettier` that checks only the files in the git
commit.
-If in doubt about formatting, you can create a pull request and if you have
-formatting flaws, the pull request checks should catch it.
+If you have doubts about formatting, submit your pull request anyway. If you
+have formatting flaws, the [pull request checks](https://github.com/features/actions)
+should catch it.
### Upgrading Packages
We maintain the dependencies using `Dependabot` in GitHub but if you want
-to manually upgrade some you can use:
+to manually upgrade them you can use:
yarn upgrade-interactive --latest
### Sharing your dev environment with `ngrok`
-[`ngrok`](https://ngrok.com/) is a great tool for starting a HTTP proxy
-server from the Internet into your Yari server. This can be useful for testing
-your current build on external tools like BrowserStack, WebPageTest,
+[`ngrok`](https://ngrok.com/) allows you to start an HTTP proxy
+server from the web into your Yari server. This can be useful for testing
+your current build using external tools like BrowserStack, WebPageTest, or
Google Translate, or to simply show a friend what you're up to. Obviously
it'll never be faster than your uplink Internet connection but it should
-be fairly feature complete.
+be fairly feature-complete.
1. [Create in account on Ngrok.com](https://dashboard.ngrok.com/signup)
2. [Download the executable](https://ngrok.com/download)
@@ -146,7 +174,7 @@ be fairly feature complete.
This will display something like this:
Session Status online
- Account (Plan: Free)
+ Account (Plan: Free)
Version 2.3.35
Region United States (us)
Web Interface http://127.0.0.1:4040
@@ -154,17 +182,18 @@ This will display something like this:
Forwarding https://920ba2108da8.ngrok.io -> http://localhost:5000
Connections ttl opn rt1 rt5 p50 p90
- 0 0 0.00 0.00 0.00 0.00
+ 0 0 0.00 0.00 0.00 0.00
-Now, take that "Forwarding" URL `https://920ba2108da8.ngrok.io` (in this
+Now, take that "Forwarding" URL (`https://920ba2108da8.ngrok.io` in this
example) and share it.
## Building
The `server` builds content automatically (on-the-fly) when you're viewing
-pages. But if you want to you can pre-emptively build all the content
-in advance. One potential advantage is that you can get a more complete
-list of all possible "flaws" across all documents before you even visit them.
+pages, but you can pre-emptively build all the content in advance if desired.
+One potential advantage is that you can get a more complete list of all possible
+"flaws" across all documents before you even visit them.
+
The most fundamental CLI command is:
yarn build
@@ -173,8 +202,9 @@ The most fundamental CLI command is:
Every `index.html` becomes two files:
-- `index.html` fully formed and complete HTML file
-- `index.json` the React needed state to build the page in the client
+- `index.html` — a fully formed and complete HTML file
+- `index.json` — the state information React needs to build the page in the
+ client
### Flaw checks
@@ -185,7 +215,7 @@ severe but they should never block a full build.
More information about how to set flaws can be found in `docs/envvars.md`.
Essentially, the default is to _warn_ about any flaw and you can see
-those flaws when using . But for completed builds,
+those flaws when using . For completed builds,
all flaws are ignored. This makes the build faster and there's also
no good place to display the flaws in a production-grade build.
@@ -196,7 +226,7 @@ be on you to fix it.
## Icons and logos
-The various formats and sizes of the favicon is generated
+The various formats and sizes of the favicon are generated
from the file `mdn-web-docs.svg` in the repository root. This file is then
converted to favicons using [realfavicongenerator.net](https://realfavicongenerator.net/).
To generate new favicons, edit or replace the `mdn-web-docs.svg` file
diff --git a/build/check-images.js b/build/check-images.js
index 11ba0727413c..129aa2f888d7 100644
--- a/build/check-images.js
+++ b/build/check-images.js
@@ -9,6 +9,7 @@ const sizeOf = require("image-size");
const { Document, Image } = require("../content");
const { FLAW_LEVELS } = require("./constants");
const { findMatchesInText } = require("./matches-in-text");
+const { DEFAULT_LOCALE } = require("../libs/constants");
/**
* Mutate the `$` instance for image reference and if appropriate,
@@ -86,7 +87,7 @@ function checkImageReferences(doc, $, options, { url, rawContent }) {
// a new function dedicated to that.
let finalSrc = null;
- if (!src.trim()) {
+ if (!src.split("#")[0].trim()) {
if (checkImages) {
addImageFlaw(img, src, {
explanation: "Empty img 'src' attribute",
@@ -119,9 +120,24 @@ function checkImageReferences(doc, $, options, { url, rawContent }) {
// it now, we still want the full relative URL.
img.attr("src", absoluteURL.pathname);
} else {
+ let suggestion = null;
+ // If this document is *not* en-US, perhaps the external image has already
+ // been downloaded by the en-US equivalent. If so, make that the suggestion.
+ if (doc.locale !== DEFAULT_LOCALE) {
+ const filePath = Image.findByURL(
+ path.join(
+ doc.mdn_url.replace(`/${doc.locale}/`, `/${DEFAULT_LOCALE}/`),
+ path.basename(src)
+ )
+ );
+ if (filePath) {
+ suggestion = path.basename(filePath);
+ }
+ }
addImageFlaw(img, src, {
explanation: "External image URL",
externalImage: true,
+ suggestion,
});
}
}
@@ -133,16 +149,38 @@ function checkImageReferences(doc, $, options, { url, rawContent }) {
// We can use the `finalSrc` to look up and find the image independent
// of the correct case because `Image.findByURL` operates case
// insensitively.
- const filePath = Image.findByURL(finalSrc);
+ let filePath = Image.findByURL(finalSrc);
+ let enUSFallback = false;
+ if (
+ !filePath &&
+ doc.locale !== DEFAULT_LOCALE &&
+ !finalSrc.startsWith(`/${DEFAULT_LOCALE.toLowerCase()}/`)
+ ) {
+ const enUSFinalSrc = finalSrc.replace(
+ `/${doc.locale.toLowerCase()}/`,
+ `/${DEFAULT_LOCALE.toLowerCase()}/`
+ );
+ if (Image.findByURL(enUSFinalSrc)) {
+ // Use the en-US src instead
+ finalSrc = enUSFinalSrc;
+ // Note that this `` value can work if you use the
+ // en-US equivalent URL instead.
+ enUSFallback = true;
+ }
+ }
if (filePath) {
filePaths.add(filePath);
}
if (checkImages) {
- if (!filePath) {
+ if (enUSFallback) {
+ // If it worked by switching to the en-US src, don't do anything more.
+ // Do nothing! I.e. don't try to perfect the spelling.
+ } else if (!filePath) {
// E.g. ` tag looks anything other than
// `` then we can't assume the `img[src]` can
// be resolved. For example, suppose the HTML contains ``
// then it's a broken image and it's handled by the `checkImageReferences()`
// function. Stay away from those.
- if (!imgSrc.includes("://") && imgSrc.startsWith("/")) {
+ if (!imgSrc) {
+ if (options.flawLevels.get("image_widths") === FLAW_LEVELS.ERROR) {
+ throw new Error(
+ `images width flaws: ${JSON.stringify(doc.flaws.image_widths)}`
+ );
+ }
+ } else if (!imgSrc.includes("://") && imgSrc.startsWith("/")) {
const filePath = Image.findByURL(imgSrc);
if (filePath) {
const dimensions = sizeOf(filePath);
diff --git a/build/cli.js b/build/cli.js
index 46c1f4dd5cd9..49b462d1da3c 100644
--- a/build/cli.js
+++ b/build/cli.js
@@ -6,30 +6,25 @@ const cliProgress = require("cli-progress");
const program = require("@caporal/core").default;
const { prompt } = require("inquirer");
-const { Document, slugToFolder } = require("../content");
+const {
+ Document,
+ slugToFolder,
+ translationsOf,
+ CONTENT_TRANSLATED_ROOT,
+} = require("../content");
+
// eslint-disable-next-line node/no-missing-require
-const { renderDocHTML, renderHTML } = require("../ssr/dist/main");
+const { renderDocHTML } = require("../ssr/dist/main");
const options = require("./build-options");
const { buildDocument, renderContributorsTxt } = require("./index");
const SearchIndex = require("./search-index");
-const {
- BUILD_OUT_ROOT,
- HOMEPAGE_FEED_URL,
- HOMEPAGE_FEED_DISPLAY_MAX,
-} = require("./constants");
+const { BUILD_OUT_ROOT } = require("./constants");
const { makeSitemapXML, makeSitemapIndexXML } = require("./sitemaps");
-const {
- CONTENT_TRANSLATED_ROOT,
- CONTENT_ROOT,
-} = require("../content/constants");
-const { uniqifyTranslationsOf } = require("./translationsof");
const { humanFileSize } = require("./utils");
-const { getFeedEntries } = require("./feedparser");
async function buildDocumentInteractive(
documentPath,
- translationsOf,
interactive,
invalidate = false
) {
@@ -38,37 +33,15 @@ async function buildDocumentInteractive(
? Document.read(documentPath, Document.MEMOIZE_INVALIDATE)
: Document.read(documentPath);
- const { translation_of } = document.metadata;
-
- // If it's a non-en-US document, it'll most likely have a `translation_of`.
- // If so, add it to the map so that when we build the en-US one, we can
- // get an index of the *other* translations available.
- if (translation_of) {
- if (!translationsOf.has(translation_of)) {
- translationsOf.set(translation_of, []);
- }
- const translation = {
- url: document.url,
- locale: document.metadata.locale,
- title: document.metadata.title,
- };
- if (document.metadata.translation_of_original) {
- translation.original = document.metadata.translation_of_original;
+ if (!interactive) {
+ const translations = translationsOf(document.metadata);
+ if (translations && translations.length > 0) {
+ document.translations = translations;
+ } else {
+ document.translations = [];
}
- translationsOf.get(translation_of).push(translation);
- // This is a shortcoming. If this is a translated document, we don't have a
- // complete mapping of all other translations. So, the best we can do is
- // at least link to the English version.
- // In 2021, when we refactor localization entirely, this will need to change.
- // Perhaps, then, we'll do a complete scan through all content first to build
- // up the map before we process each one.
- document.translations = [];
- } else if (translationsOf.has(document.metadata.slug)) {
- document.translations = uniqifyTranslationsOf(
- translationsOf.get(document.metadata.slug),
- document.url
- );
}
+
return { document, doc: await buildDocument(document), skip: false };
} catch (e) {
if (!interactive) {
@@ -89,12 +62,7 @@ async function buildDocumentInteractive(
},
]);
if (action === "r") {
- return await buildDocumentInteractive(
- documentPath,
- translationsOf,
- interactive,
- true
- );
+ return await buildDocumentInteractive(documentPath, interactive, true);
}
if (action === "s") {
return { doc: {}, skip: true };
@@ -142,9 +110,6 @@ async function buildDocuments(
}
}
- // This builds up a mapping from en-US slugs to their translated slugs.
- const translationsOf = new Map();
-
if (!options.noProgressbar) {
progressBar.start(documents.count);
}
@@ -154,11 +119,7 @@ async function buildDocuments(
doc: { doc: builtDocument, liveSamples, fileAttachments, bcdData },
document,
skip,
- } = await buildDocumentInteractive(
- documentPath,
- translationsOf,
- interactive
- );
+ } = await buildDocumentInteractive(documentPath, interactive);
if (skip) {
continue;
}
@@ -270,10 +231,8 @@ async function buildDocuments(
sitemapsBuilt.push(sitemapFilePath);
}
- // Only if you've just built all of CONTENT_ROOT and all of CONTENT_TRANSLATED_ROOT
// do we bother generating the combined sitemaps index file.
// That means, that if you've done this at least once, consequent runs of
- // *only* CONTENT_ROOT will just keep overwriting the sitemaps/en-us/sitemap.xml.gz.
if (CONTENT_TRANSLATED_ROOT) {
const sitemapIndexFilePath = path.join(BUILD_OUT_ROOT, "sitemap.xml");
fs.writeFileSync(
@@ -294,85 +253,6 @@ async function buildDocuments(
return { slugPerLocale: docPerLocale, peakHeapBytes, totalFlaws };
}
-async function buildOtherSPAs(options) {
- // The URL isn't very important as long as it triggers the right route in the
- const url = "/en-US/404.html";
- const html = renderHTML(url, { pageNotFound: true });
- const outPath = path.join(BUILD_OUT_ROOT, "en-us", "_spas");
- fs.mkdirSync(outPath, { recursive: true });
- fs.writeFileSync(path.join(outPath, path.basename(url)), html);
- if (!options.quiet) {
- console.log("Wrote", path.join(outPath, path.basename(url)));
- }
-
- // Basically, this builds one `search/index.html` for every locale we intend
- // to build.
- for (const root of [CONTENT_ROOT, CONTENT_TRANSLATED_ROOT]) {
- if (!root) {
- continue;
- }
- for (const locale of fs.readdirSync(root)) {
- if (!fs.statSync(path.join(root, locale)).isDirectory()) {
- continue;
- }
- const url = `/${locale}/search`;
- const html = renderHTML(url);
- const outPath = path.join(BUILD_OUT_ROOT, locale, "search");
- fs.mkdirSync(outPath, { recursive: true });
- const filePath = path.join(outPath, "index.html");
- fs.writeFileSync(filePath, html);
- if (!options.quiet) {
- console.log("Wrote", filePath);
- }
- }
- }
-
- // Build all the home pages in all locales.
- // Have the feed entries ready before building the home pages.
- // XXX disk caching?
- const feedEntries = (await getFeedEntries(HOMEPAGE_FEED_URL)).slice(
- 0,
- HOMEPAGE_FEED_DISPLAY_MAX
- );
- for (const root of [CONTENT_ROOT, CONTENT_TRANSLATED_ROOT]) {
- if (!root) {
- continue;
- }
- for (const locale of fs.readdirSync(root)) {
- if (!fs.statSync(path.join(root, locale)).isDirectory()) {
- continue;
- }
- const url = `/${locale}/`;
- // Each .pubDate in feedEntries is a Date object. That has to be converted
- // to a string. That way the SSR rendering is
- const dateFormatter = new Intl.DateTimeFormat(locale, {
- dateStyle: "full",
- });
- const context = {
- feedEntries: feedEntries.map((entry) => {
- const pubDateString = dateFormatter.format(entry.pubDate);
- return Object.assign({}, entry, { pubDate: pubDateString });
- }),
- };
- const html = renderHTML(url, context);
- const outPath = path.join(BUILD_OUT_ROOT, locale);
- fs.mkdirSync(outPath, { recursive: true });
- const filePath = path.join(outPath, "index.html");
- fs.writeFileSync(filePath, html);
- if (!options.quiet) {
- console.log("Wrote", filePath);
- }
- // Also, dump the feed entries as a JSON file so the data can be gotten
- // in client-side rendering.
- const filePathContext = path.join(outPath, "index.json");
- fs.writeFileSync(filePathContext, JSON.stringify(context));
- if (!options.quiet) {
- console.log("Wrote", filePathContext);
- }
- }
- }
-}
-
function formatTotalFlaws(flawsCountMap, header = "Total_Flaws_Count") {
if (!flawsCountMap.size) {
return "";
@@ -392,24 +272,12 @@ function formatTotalFlaws(flawsCountMap, header = "Total_Flaws_Count") {
program
.name("build")
- .option("--spas", "Build the SPA pages", { default: true }) // PR builds
- .option("--spas-only", "Only build the SPA pages", { default: false })
.option("-i, --interactive", "Ask what to do when encountering flaws", {
default: false,
})
.argument("[files...]", "specific files to build")
.action(async ({ args, options }) => {
try {
- if (options.spas) {
- if (!options.quiet) {
- console.log("\nBuilding SPAs...");
- }
- await buildOtherSPAs(options);
- }
- if (options.spasOnly) {
- return;
- }
-
if (!options.quiet) {
console.log("\nBuilding Documents...");
}
diff --git a/build/constants.js b/build/constants.js
index 6aa6e4cca10e..9a8e6d6bb941 100644
--- a/build/constants.js
+++ b/build/constants.js
@@ -30,6 +30,8 @@ const VALID_FLAW_CHECKS = new Set([
"image_widths",
"bad_pre_tags",
"sectioning",
+ "heading_links",
+ "unsafe_html",
]);
// TODO (far future): Switch to "error" when number of flaws drops.
diff --git a/build/feedparser.js b/build/feedparser.js
index c2b22976ce3d..0e25ca7119e2 100644
--- a/build/feedparser.js
+++ b/build/feedparser.js
@@ -14,9 +14,10 @@ async function getFeedEntries(url) {
for (const item of feed.rss.channel.item) {
const description = cheerio.load(item.description);
const summary = description("p").text();
+ const title = cheerio.load(item.title).text();
entries.push({
url: item.link,
- title: item.title,
+ title,
pubDate: new Date(item.pubDate),
creator: item["dc:creator"],
summary,
diff --git a/build/flaws.js b/build/flaws.js
index 5eb3c27b910a..3f916527c532 100644
--- a/build/flaws.js
+++ b/build/flaws.js
@@ -12,28 +12,162 @@ const imageminSvgo = require("imagemin-svgo");
const sanitizeFilename = require("sanitize-filename");
const { Archive, Document, Redirect, Image } = require("../content");
-const { FLAW_LEVELS } = require("./constants");
+const { FLAW_LEVELS, VALID_FLAW_CHECKS } = require("./constants");
+const {
+ INTERACTIVE_EXAMPLES_BASE_URL,
+ LIVE_SAMPLES_BASE_URL,
+} = require("../kumascript/src/constants");
const { packageBCD } = require("./resolve-bcd");
const {
findMatchesInText,
+ getFirstMatchInText,
replaceMatchesInText,
} = require("./matches-in-text");
const { humanFileSize } = require("./utils");
const { VALID_MIME_TYPES } = require("../filecheck/constants");
-function injectFlaws(doc, $, options, { rawContent }) {
+function injectFlaws(doc, $, options, document) {
if (doc.isArchive) return;
- injectBrokenLinksFlaws(
- options.flawLevels.get("broken_links"),
- doc,
- $,
- rawContent
- );
+ const flawChecks = [
+ ["unsafe_html", injectUnsafeHTMLFlaws, false],
+ ["broken_links", injectBrokenLinksFlaws, true],
+ ["bad_bcd_queries", injectBadBCDQueriesFlaws, false],
+ ["bad_pre_tags", injectPreTagFlaws, false],
+ ["heading_links", injectHeadingLinksFlaws, false],
+ ];
+
+ // Note that some flaw checking functions need to always run. Even if we're not
+ // recording the flaws, the checks that it does are important for regular
+ // building.
+
+ for (const [flawName, func, alwaysRun] of flawChecks) {
+ // Sanity check the list of flaw names that they're all recognized.
+ // Basically a cheap enum check.
+ if (!VALID_FLAW_CHECKS.has(flawName)) {
+ throw new Error(`'${flawName}' is not a valid flaw check name`);
+ }
- injectBadBCDQueriesFlaws(options.flawLevels.get("bad_bcd_queries"), doc, $);
+ const level = options.flawLevels.get(flawName);
+ if (!alwaysRun && level === FLAW_LEVELS.IGNORE) {
+ continue;
+ }
- injectPreTagFlaws(options.flawLevels.get("bad_pre_tags"), doc, $, rawContent);
+ // The flaw injection function will mutate the `doc.flaws` object.
+ func(doc, $, document, level);
+
+ if (
+ level === FLAW_LEVELS.ERROR &&
+ doc.flaws[flawName] &&
+ doc.flaws[flawName].length > 0
+ ) {
+ // To make the stdout output a bit more user-friendly, print one warning
+ // for each explanation
+ doc.flaws[flawName].forEach((flaw, i) => {
+ console.warn(
+ i + 1,
+ chalk.yellow(`${chalk.bold(flawName)} flaw: ${flaw.explanation}`)
+ );
+ });
+ throw new Error(`${doc.flaws[flawName].length} ${flawName} flaws`);
+ }
+ }
+}
+
+function injectUnsafeHTMLFlaws(doc, $, { rawContent }) {
+ function addFlaw(element, explanation) {
+ if (!("unsafe_html" in doc.flaws)) {
+ doc.flaws.unsafe_html = [];
+ }
+ const id = `unsafe_html${doc.flaws.unsafe_html.length + 1}`;
+ let html = $.html($(element));
+ $(element).replaceWith($("").addClass("unsafe-html").text(html));
+ // Some nasty tags are so broken they can make the HTML become more or less
+ // the whole page. E.g. ``
+ if (tagName.startsWith("script")) {
+ addFlaw(element, `possible
-