diff --git a/packages/gatsby-transformer-remark/src/extend-node-type.js b/packages/gatsby-transformer-remark/src/extend-node-type.js index fd7ac66e8ecfd..221173c21790f 100644 --- a/packages/gatsby-transformer-remark/src/extend-node-type.js +++ b/packages/gatsby-transformer-remark/src/extend-node-type.js @@ -128,7 +128,7 @@ module.exports = ( ASTPromiseMap.delete(cacheKey) }).catch(err => { ASTPromiseMap.delete(cacheKey) - throw err + return err }) // Save new AST to cache and return // We can now release promise, as we cached result @@ -137,99 +137,98 @@ module.exports = ( } } - function getMarkdownAST(markdownNode) { - return new Promise(async (resolve, reject) => { - if (process.env.NODE_ENV !== `production` || !fileNodes) { - fileNodes = getNodesByType(`File`) + async function getMarkdownAST(markdownNode) { + if (process.env.NODE_ENV !== `production` || !fileNodes) { + fileNodes = getNodesByType(`File`) + } + // Use Bluebird's Promise function "each" to run remark plugins serially. + await Promise.each(pluginOptions.plugins, plugin => { + const requiredPlugin = require(plugin.resolve) + if (_.isFunction(requiredPlugin.mutateSource)) { + return requiredPlugin.mutateSource( + { + markdownNode, + files: fileNodes, + getNode, + reporter, + cache, + }, + plugin.pluginOptions + ) + } else { + return Promise.resolve() } - // Use Bluebird's Promise function "each" to run remark plugins serially. - await Promise.each(pluginOptions.plugins, plugin => { - const requiredPlugin = require(plugin.resolve) - if (_.isFunction(requiredPlugin.mutateSource)) { - return requiredPlugin.mutateSource( - { - markdownNode, - files: fileNodes, - getNode, - reporter, - cache, - }, - plugin.pluginOptions - ) - } else { - return Promise.resolve() + }) + const markdownAST = remark.parse(markdownNode.internal.content) + + if (pathPrefix) { + // Ensure relative links include `pathPrefix` + visit(markdownAST, [`link`, `definition`], node => { + if ( + node.url && + node.url.startsWith(`/`) && + !node.url.startsWith(`//`) + ) { + node.url = withPathPrefix(node.url, pathPrefix) } }) - const markdownAST = remark.parse(markdownNode.internal.content) - - if (pathPrefix) { - // Ensure relative links include `pathPrefix` - visit(markdownAST, [`link`, `definition`], node => { - if ( - node.url && - node.url.startsWith(`/`) && - !node.url.startsWith(`//`) - ) { - node.url = withPathPrefix(node.url, pathPrefix) - } - }) - } + } - // source => parse (can order parsing for dependencies) => typegen - // - // source plugins identify nodes, provide id, initial parse, know - // when nodes are created/removed/deleted - // get passed cached DataTree and return list of clean and dirty nodes. - // Also get passed `dirtyNodes` function which they can call with an array - // of node ids which will then get re-parsed and the inferred schema - // recreated (if inferring schema gets too expensive, can also - // cache the schema until a query fails at which point recreate the - // schema). - // - // parse plugins take data from source nodes and extend it, never mutate - // it. Freeze all nodes once done so typegen plugins can't change it - // this lets us save off the DataTree at that point as well as create - // indexes. - // - // typegen plugins identify further types of data that should be lazily - // computed due to their expense, or are hard to infer graphql type - // (markdown ast), or are need user input in order to derive e.g. - // markdown headers or date fields. - // - // wrap all resolve functions to (a) auto-memoize and (b) cache to disk any - // resolve function that takes longer than ~10ms (do research on this - // e.g. how long reading/writing to cache takes), and (c) track which - // queries are based on which source nodes. Also if connection of what - // which are always rerun if their underlying nodes change.. - // - // every node type in DataTree gets a schema type automatically. - // typegen plugins just modify the auto-generated types to add derived fields - // as well as computationally expensive fields. - if (process.env.NODE_ENV !== `production` || !fileNodes) { - fileNodes = getNodesByType(`File`) + // source => parse (can order parsing for dependencies) => typegen + // + // source plugins identify nodes, provide id, initial parse, know + // when nodes are created/removed/deleted + // get passed cached DataTree and return list of clean and dirty nodes. + // Also get passed `dirtyNodes` function which they can call with an array + // of node ids which will then get re-parsed and the inferred schema + // recreated (if inferring schema gets too expensive, can also + // cache the schema until a query fails at which point recreate the + // schema). + // + // parse plugins take data from source nodes and extend it, never mutate + // it. Freeze all nodes once done so typegen plugins can't change it + // this lets us save off the DataTree at that point as well as create + // indexes. + // + // typegen plugins identify further types of data that should be lazily + // computed due to their expense, or are hard to infer graphql type + // (markdown ast), or are need user input in order to derive e.g. + // markdown headers or date fields. + // + // wrap all resolve functions to (a) auto-memoize and (b) cache to disk any + // resolve function that takes longer than ~10ms (do research on this + // e.g. how long reading/writing to cache takes), and (c) track which + // queries are based on which source nodes. Also if connection of what + // which are always rerun if their underlying nodes change.. + // + // every node type in DataTree gets a schema type automatically. + // typegen plugins just modify the auto-generated types to add derived fields + // as well as computationally expensive fields. + if (process.env.NODE_ENV !== `production` || !fileNodes) { + fileNodes = getNodesByType(`File`) + } + // Use Bluebird's Promise function "each" to run remark plugins serially. + await Promise.each(pluginOptions.plugins, plugin => { + const requiredPlugin = require(plugin.resolve) + if (_.isFunction(requiredPlugin)) { + return requiredPlugin( + { + markdownAST, + markdownNode, + getNode, + files: fileNodes, + pathPrefix, + reporter, + cache, + }, + plugin.pluginOptions + ) + } else { + return Promise.resolve() } - // Use Bluebird's Promise function "each" to run remark plugins serially. - await Promise.each(pluginOptions.plugins, plugin => { - const requiredPlugin = require(plugin.resolve) - if (_.isFunction(requiredPlugin)) { - return requiredPlugin( - { - markdownAST, - markdownNode, - getNode, - files: fileNodes, - pathPrefix, - reporter, - cache, - }, - plugin.pluginOptions - ) - } else { - return Promise.resolve() - } - }) - resolve(markdownAST) }) + + return markdownAST } async function getHeadings(markdownNode) {