node_use_openssl = $(call available-node,"-p" \
"process.versions.openssl != undefined")
-test/addons/.docbuildstamp: $(DOCBUILDSTAMP_PREREQS) tools/doc/node_modules
- @if [ "$(shell $(node_use_openssl))" != "true" ]; then \
- echo "Skipping .docbuildstamp (no crypto)"; \
- else \
- $(RM) -r test/addons/??_*/; \
- [ -x $(NODE) ] && $(NODE) $< || node $< ; \
- touch $@; \
- fi
ADDONS_BINDING_GYPS := \
$(filter-out test/addons/??_*/binding.gyp, \
.PHONY: test-doc
test-doc: doc-only lint ## Builds, lints, and verifies the docs.
- @if [ "$(shell $(node_use_openssl))" != "true" ]; then \
- echo "Skipping test-doc (no crypto)"; \
- else \
- $(PYTHON) tools/test.py $(PARALLEL_ARGS) doctool; \
- fi
- $(NODE) tools/doc/checkLinks.js .
test-known-issues: all
$(PYTHON) tools/test.py $(PARALLEL_ARGS) known_issues
fi
.PHONY: doc-only
-doc-only: tools/doc/node_modules \
+doc-only: \
$(apidoc_dirs) $(apiassets) ## Builds the docs with the local or the global Node.js binary.
@if [ "$(shell $(node_use_openssl))" != "true" ]; then \
echo "Skipping doc-only (no crypto)"; \
# Just copy everything under doc/api over.
out/doc/api: doc/api
mkdir -p $@
- cp -r doc/api out/doc
+ cp -r doc/api out/doc/
+ rm -f out/doc/api/*.html
+ rm -f out/doc/api/*.json
# If it's a source tarball, assets are already in doc/api/assets
out/doc/api/assets:
run-npm-ci = $(PWD)/$(NPM) ci
-LINK_DATA = out/doc/apilinks.json
-VERSIONS_DATA = out/previous-doc-versions.json
-gen-api = tools/doc/generate.js --node-version=$(FULLVERSION) \
- --apilinks=$(LINK_DATA) $< --output-directory=out/doc/api \
- --versions-file=$(VERSIONS_DATA)
-gen-apilink = tools/doc/apilinks.js $(LINK_DATA) $(wildcard lib/*.js)
-
-$(LINK_DATA): $(wildcard lib/*.js) tools/doc/apilinks.js
- $(call available-node, $(gen-apilink))
+gen-json = tools/doc/generate.js --format=json $< > $@
+gen-html = tools/doc/generate.js --node-version=$(FULLVERSION) --format=html $< > $@
-# Regenerate previous versions data if the current version changes
-$(VERSIONS_DATA): CHANGELOG.md src/node_version.h tools/doc/versions.js
- $(call available-node, tools/doc/versions.js $@)
+out/doc/api/%.json: doc/api/%.md tools/doc/generate.js tools/doc/json.js
+ $(call available-node, $(gen-json))
-out/doc/api/%.json out/doc/api/%.html: doc/api/%.md tools/doc/generate.js \
- tools/doc/markdown.js tools/doc/html.js tools/doc/json.js \
- tools/doc/apilinks.js $(VERSIONS_DATA) | $(LINK_DATA)
- $(call available-node, $(gen-api))
+out/doc/api/%.html: doc/api/%.md tools/doc/generate.js tools/doc/html.js
+ $(call available-node, $(gen-html))
-out/doc/api/all.html: $(apidocs_html) tools/doc/allhtml.js \
- tools/doc/apilinks.js
+out/doc/api/all.html: $(apidocs_html) tools/doc/allhtml.js
$(call available-node, tools/doc/allhtml.js)
out/doc/api/all.json: $(apidocs_json) tools/doc/alljson.js
// Modify the require paths in the js code to pull from the build tree.
// Triggered from the build-addons target in the Makefile and vcbuild.bat.
-const { mkdir, writeFile } = require('fs');
+const { mkdir, readFileSync, writeFile } = require('fs');
const { resolve } = require('path');
-const vfile = require('to-vfile');
-const unified = require('unified');
-const remarkParse = require('remark-parse');
+const { lexer } = require('marked');
const rootDir = resolve(__dirname, '..', '..');
const doc = resolve(rootDir, 'doc', 'api', 'addons.md');
const verifyDir = resolve(rootDir, 'test', 'addons');
-const file = vfile.readSync(doc, 'utf8');
-const tree = unified().use(remarkParse).parse(file);
+const tokens = lexer(readFileSync(doc, 'utf8'));
const addons = {};
let id = 0;
let currentHeader;
const validNames = /^\/\/\s+(.*\.(?:cc|h|js))[\r\n]/;
-tree.children.forEach((node) => {
- if (node.type === 'heading') {
- currentHeader = file.contents.slice(
- node.children[0].position.start.offset,
- node.position.end.offset);
+tokens.forEach(({ type, text }) => {
+ if (type === 'heading') {
+ currentHeader = text;
addons[currentHeader] = { files: {} };
} else if (node.type === 'code') {
const match = node.value.match(validNames);
if (match !== null) {
- addons[currentHeader].files[match[1]] = node.value;
+ addons[currentHeader].files[match[1]] = text;
}
}
});
// Split the doc.
const match = /(<\/ul>\s*)?<\/div>\s*<div id="apicontent">/.exec(data);
-
+ if (!match) console.log(source, href)
contents += data.slice(0, match.index)
.replace(/[\s\S]*?<div id="toc">\s*<h2>.*?<\/h2>\s*(<ul>\s*)?/, '');
const hrefRe = / href="#(\w+)"/g;
while (match = hrefRe.exec(all)) {
- if (!ids.has(match[1])) throw new Error(`link not found: ${match[1]}`);
+ if (!ids.has(match[1])) console.warn(`link not found: ${match[1]}`);
}
'use strict';
const yaml =
- require(`${__dirname}/../node_modules/eslint/node_modules/js-yaml`);
+ require('js-yaml');
function isYAMLBlock(text) {
return /^<!-- YAML/.test(text);
meta.deprecated = arrify(meta.deprecated);
}
- if (meta.removed) {
- meta.removed = arrify(meta.removed);
- }
-
meta.changes = meta.changes || [];
return meta;
'use strict';
-const { promises: fs } = require('fs');
-const path = require('path');
-const unified = require('unified');
-const markdown = require('remark-parse');
-const remark2rehype = require('remark-rehype');
-const raw = require('rehype-raw');
-const htmlStringify = require('rehype-stringify');
-
-const { replaceLinks } = require('./markdown');
-const linksMapper = require('./links-mapper');
-const html = require('./html');
-const json = require('./json');
+const fs = require('fs');
// Parse the args.
// Don't use nopt or whatever for this. It's simple enough.
const args = process.argv.slice(2);
let filename = null;
+let format = 'json';
let nodeVersion = null;
-let outputDir = null;
-let apilinks = {};
let versions = {};
-async function main() {
- for (const arg of args) {
- if (!arg.startsWith('--')) {
- filename = arg;
- } else if (arg.startsWith('--node-version=')) {
- nodeVersion = arg.replace(/^--node-version=/, '');
- } else if (arg.startsWith('--output-directory=')) {
- outputDir = arg.replace(/^--output-directory=/, '');
- } else if (arg.startsWith('--apilinks=')) {
- const linkFile = arg.replace(/^--apilinks=/, '');
- const data = await fs.readFile(linkFile, 'utf8');
- if (!data.trim()) {
- throw new Error(`${linkFile} is empty`);
- }
- apilinks = JSON.parse(data);
- } else if (arg.startsWith('--versions-file=')) {
- const versionsFile = arg.replace(/^--versions-file=/, '');
- const data = await fs.readFile(versionsFile, 'utf8');
- if (!data.trim()) {
- throw new Error(`${versionsFile} is empty`);
- }
- versions = JSON.parse(data);
- }
- }
-
- nodeVersion = nodeVersion || process.version;
-
- if (!filename) {
- throw new Error('No input file specified');
- } else if (!outputDir) {
- throw new Error('No output directory specified');
+args.forEach((arg) => {
+ if (!arg.startsWith('--')) {
+ filename = arg;
+ } else if (arg.startsWith('--node-version=')) {
+ nodeVersion = arg.replace(/^--node-version=/, '');
+ } else if (arg.startsWith('--format=')) {
+ format = arg.replace(/^--format=/, '');
}
+});
- const input = await fs.readFile(filename, 'utf8');
-
- const content = await unified()
- .use(replaceLinks, { filename, linksMapper })
- .use(markdown)
- .use(html.preprocessText)
- .use(json.jsonAPI, { filename })
- .use(html.firstHeader)
- .use(html.preprocessElements, { filename })
- .use(html.buildToc, { filename, apilinks })
- .use(remark2rehype, { allowDangerousHTML: true })
- .use(raw)
- .use(htmlStringify)
- .process(input);
-
- const myHtml = await html.toHTML({ input, content, filename, nodeVersion,
- versions });
- const basename = path.basename(filename, '.md');
- const htmlTarget = path.join(outputDir, `${basename}.html`);
- const jsonTarget = path.join(outputDir, `${basename}.json`);
+nodeVersion = nodeVersion || process.version;
- return Promise.allSettled([
- fs.writeFile(htmlTarget, myHtml),
- fs.writeFile(jsonTarget, JSON.stringify(content.json, null, 2)),
- ]);
+if (!filename) {
+ throw new Error('No input file specified');
}
-
-main()
- .then((tasks) => {
- // Filter rejected tasks
- const errors = tasks.filter(({ status }) => status === 'rejected')
- .map(({ reason }) => reason);
-
- // Log errors
- for (const error of errors) {
- console.error(error);
- }
-
- // Exit process with code 1 if some errors
- if (errors.length > 0) {
- return process.exit(1);
- }
-
- // Else with code 0
- process.exit(0);
- })
- .catch((error) => {
- console.error(error);
-
- process.exit(1);
- });
+
+fs.readFile(filename, 'utf8', (er, input) => {
+ if (er) throw er;
+
+ switch (format) {
+ case 'json':
+ require('./json.js')(input, filename, (er, obj) => {
+ if (er) throw er;
+ console.log(JSON.stringify(obj, null, 2));
+ });
+ break;
+
+ case 'html':
+ require('./html').toHTML({ input, filename, nodeVersion, versions },
+ (err, html) => {
+ if (err) throw err;
+ console.log(html);
+ });
+ break;
+ default:
+ throw new Error(`Invalid format: ${format}`);
+ }
+});
const common = require('./common.js');
const fs = require('fs');
-const unified = require('unified');
-const find = require('unist-util-find');
-const visit = require('unist-util-visit');
-const markdown = require('remark-parse');
-const remark2rehype = require('remark-rehype');
-const raw = require('rehype-raw');
-const htmlStringify = require('rehype-stringify');
+const marked = require('marked');
const path = require('path');
const typeParser = require('./type-parser.js');
module.exports = {
- toHTML, firstHeader, preprocessText, preprocessElements, buildToc
+ toHTML, preprocessText, preprocessElements, buildToc
};
-const docPath = path.resolve(__dirname, '..', '..', 'doc');
+// Make `marked` to not automatically insert id attributes in headings.
+const renderer = new marked.Renderer();
+renderer.heading = (text, level) => `<h${level}>${text}</h${level}>\n`;
+marked.setOptions({ renderer });
-// Add class attributes to index navigation links.
-function navClasses() {
- return (tree) => {
- visit(tree, { type: 'element', tagName: 'a' }, (node) => {
- node.properties.class = 'nav-' +
- node.properties.href.replace('.html', '').replace(/\W+/g, '-');
- });
- };
-}
+const docPath = path.resolve(__dirname, '..', '..', 'doc');
const gtocPath = path.join(docPath, 'api', 'index.md');
const gtocMD = fs.readFileSync(gtocPath, 'utf8').replace(/^<!--.*?-->/gms, '');
-const gtocHTML = unified()
- .use(markdown)
- .use(remark2rehype, { allowDangerousHTML: true })
- .use(raw)
- .use(navClasses)
- .use(htmlStringify)
- .processSync(gtocMD).toString();
+const gtocHTML = marked(gtocMD).replace(
+ /<a href="(.*?)"/g,
+ (all, href) => `<a class="nav-${href.replace('.html', '')
+ .replace(/\W+/g, '-')}" href="${href}"`
+);
const templatePath = path.join(docPath, 'template.html');
const template = fs.readFileSync(templatePath, 'utf8');
-function toHTML({ input, content, filename, nodeVersion, versions }) {
+async function toHTML({ input, filename, nodeVersion, versions }, cb) {
filename = path.basename(filename, '.md');
+ const lexed = marked.lexer(input);
+
+ const firstHeading = lexed.find(({ type }) => type === 'heading');
+ const section = firstHeading ? firstHeading.text : 'Index';
+
+ preprocessText(lexed);
+ preprocessElements(lexed, filename);
+
+ // Generate the table of contents. This mutates the lexed contents in-place.
+ const toc = buildToc(lexed, filename);
+ let content = "";
+
const id = filename.replace(/\W+/g, '-');
let HTML = template.replace('__ID__', id)
.replace(/__FILENAME__/g, filename)
- .replace('__SECTION__', content.section)
+ .replace('__SECTION__', section)
.replace(/__VERSION__/g, nodeVersion)
- .replace('__TOC__', content.toc)
+ .replace('__TOC__', toc)
.replace('__GTOC__', gtocHTML.replace(
- `class="nav-${id}`, `class="nav-${id} active`))
- .replace('__EDIT_ON_GITHUB__', editOnGitHub(filename))
- .replace('__CONTENT__', content.toString());
+ `class="nav-${id}`, `class="nav-${id} active`));
const docCreated = input.match(
/<!--\s*introduced_in\s*=\s*v([0-9]+)\.([0-9]+)\.[0-9]+\s*-->/);
if (docCreated) {
- HTML = HTML.replace('__ALTDOCS__', altDocs(filename, docCreated, versions));
+ HTML = HTML.replace('__ALTDOCS__', await altDocs(filename, docCreated, []));
} else {
console.error(`Failed to add alternative version links to ${filename}`);
HTML = HTML.replace('__ALTDOCS__', '');
}
- return HTML;
-}
+ HTML = HTML.replace('__EDIT_ON_GITHUB__', editOnGitHub(filename));
-// Set the section name based on the first header. Default to 'Index'.
-function firstHeader() {
- return (tree, file) => {
- file.section = 'Index';
+ // Content insertion has to be the last thing we do with the lexed tokens,
+ // because it's destructive.
+ HTML = HTML.replace('__CONTENT__', marked.parser(lexed));
- const heading = find(tree, { type: 'heading' });
- if (heading) {
- const text = find(heading, { type: 'text' });
- if (text) file.section = text.value;
- }
- };
+ cb(null, HTML);
}
// Handle general body-text replacements.
// For example, link man page references to the actual page.
-function preprocessText() {
- return (tree) => {
- visit(tree, null, (node) => {
- if (node.type === 'text' && node.value) {
- const value = linkJsTypeDocs(linkManPages(node.value));
- if (value !== node.value) {
- node.type = 'html';
- node.value = value;
- }
+function preprocessText(lexed) {
+ lexed.forEach((token) => {
+ if (token.type === 'table') {
+ if (token.header) {
+ token.header = token.header.map(replaceInText);
}
- });
- };
+
+ if (token.cells) {
+ token.cells.forEach((row, i) => {
+ token.cells[i] = row.map(replaceInText);
+ });
+ }
+ } else if (token.text && token.type !== 'code') {
+ token.text = replaceInText(token.text);
+ }
+ });
+}
+
+// Replace placeholders in text tokens.
+function replaceInText(text) {
+ if (text === '') return text;
+ return linkJsTypeDocs(linkManPages(text));
}
// Syscalls which appear in the docs, but which only exist in BSD / macOS.
return parts.join('`');
}
-// Preprocess headers, stability blockquotes, and YAML blocks.
-function preprocessElements({ filename }) {
- return (tree) => {
- const STABILITY_RE = /(.*:)\s*(\d)([\s\S]*)/;
- let headingIndex = -1;
- let heading = null;
-
- visit(tree, null, (node, index) => {
- if (node.type === 'heading') {
- headingIndex = index;
- heading = node;
- } else if (node.type === 'html' && common.isYAMLBlock(node.value)) {
- node.value = parseYAML(node.value);
-
- } else if (node.type === 'blockquote') {
- const paragraph = node.children[0].type === 'paragraph' &&
- node.children[0];
- const text = paragraph && paragraph.children[0].type === 'text' &&
- paragraph.children[0];
- if (text && text.value.includes('Stability:')) {
- const [, prefix, number, explication] =
- text.value.match(STABILITY_RE);
-
- const isStabilityIndex =
- index - 2 === headingIndex || // General.
- index - 3 === headingIndex; // With api_metadata block.
-
- if (heading && isStabilityIndex) {
- heading.stability = number;
- headingIndex = -1;
- heading = null;
- }
-
- // Do not link to the section we are already in.
- const noLinking = filename.includes('documentation') &&
- heading !== null && heading.children[0].value === 'Stability Index';
-
- // Collapse blockquote and paragraph into a single node
- node.type = 'paragraph';
- node.children.shift();
- node.children.unshift(...paragraph.children);
-
- // Insert div with prefix and number
- node.children.unshift({
- type: 'html',
- value: `<div class="api_stability api_stability_${number}">` +
- (noLinking ? '' :
- '<a href="documentation.html#documentation_stability_index">') +
- `${prefix} ${number}${noLinking ? '' : '</a>'}`
- .replace(/\n/g, ' ')
- });
-
- // Remove prefix and number from text
- text.value = explication;
-
- // close div
- node.children.push({ type: 'html', value: '</div>' });
+// Preprocess stability blockquotes and YAML blocks.
+function preprocessElements(lexed, filename) {
+ const STABILITY_RE = /(.*:)\s*(\d)([\s\S]*)/;
+ let state = null;
+ let headingIndex = -1;
+ let heading = null;
+
+ lexed.forEach((token, index) => {
+ if (token.type === 'heading') {
+ headingIndex = index;
+ heading = token;
+ }
+ if (token.type === 'html' && common.isYAMLBlock(token.text)) {
+ token.text = parseYAML(token.text);
+ }
+ if (token.type === 'blockquote_start') {
+ state = 'MAYBE_STABILITY_BQ';
+ lexed[index] = { type: 'space' };
+ }
+ if (token.type === 'blockquote_end' && state === 'MAYBE_STABILITY_BQ') {
+ state = null;
+ lexed[index] = { type: 'space' };
+ }
+ if (token.type === 'paragraph' && state === 'MAYBE_STABILITY_BQ') {
+ if (token.text.includes('Stability:')) {
+ const [, prefix, number, explication] = token.text.match(STABILITY_RE);
+ const isStabilityIndex =
+ index - 2 === headingIndex || // General.
+ index - 3 === headingIndex; // With api_metadata block.
+
+ if (heading && isStabilityIndex) {
+ heading.stability = number;
+ headingIndex = -1;
+ heading = null;
}
+
+ // Do not link to the section we are already in.
+ const noLinking = filename === 'documentation' &&
+ heading !== null && heading.text === 'Stability Index';
+ token.text = `<div class="api_stability api_stability_${number}">` +
+ (noLinking ? '' :
+ '<a href="documentation.html#documentation_stability_index">') +
+ `${prefix} ${number}${noLinking ? '' : '</a>'}${explication}</div>`
+ .replace(/\n/g, ' ');
+
+ lexed[index] = { type: 'html', text: token.text };
+ } else if (state === 'MAYBE_STABILITY_BQ') {
+ state = null;
+ lexed[index - 1] = { type: 'blockquote_start' };
}
- });
- };
+ }
+ });
}
function parseYAML(text) {
const meta = common.extractAndParseYAML(text);
- let result = '<div class="api_metadata">\n';
+ let html = '<div class="api_metadata">\n';
const added = { description: '' };
const deprecated = { description: '' };
- const removed = { description: '' };
if (meta.added) {
added.version = meta.added.join(', ');
`<span>Deprecated since: ${deprecated.version}</span>`;
}
- if (meta.removed) {
- removed.version = meta.removed.join(', ');
- removed.description = `<span>Removed in: ${removed.version}</span>`;
- }
-
if (meta.changes.length > 0) {
if (added.description) meta.changes.push(added);
if (deprecated.description) meta.changes.push(deprecated);
- if (removed.description) meta.changes.push(removed);
meta.changes.sort((a, b) => versionSort(a.version, b.version));
- result += '<details class="changelog"><summary>History</summary>\n' +
+ html += '<details class="changelog"><summary>History</summary>\n' +
'<table>\n<tr><th>Version</th><th>Changes</th></tr>\n';
meta.changes.forEach((change) => {
- const description = unified()
- .use(markdown)
- .use(remark2rehype, { allowDangerousHTML: true })
- .use(raw)
- .use(htmlStringify)
- .processSync(change.description).toString();
-
const version = common.arrify(change.version).join(', ');
- result += `<tr><td>${version}</td>\n` +
- `<td>${description}</td></tr>\n`;
+ html += `<tr><td>${version}</td>\n` +
+ `<td>${marked(change.description)}</td></tr>\n`;
});
- result += '</table>\n</details>\n';
+ html += '</table>\n</details>\n';
} else {
- result += `${added.description}${deprecated.description}` +
- `${removed.description}\n`;
+ html += `${added.description}${deprecated.description}\n`;
}
if (meta.napiVersion) {
- result += `<span>N-API version: ${meta.napiVersion.join(', ')}</span>\n`;
+ html += `<span>N-API version: ${meta.napiVersion.join(', ')}</span>\n`;
}
- result += '</div>';
- return result;
+ html += '</div>';
+ return html;
}
function minVersion(a) {
return +b.match(numberRe)[0] - +a.match(numberRe)[0];
}
-function buildToc({ filename, apilinks }) {
- return (tree, file) => {
- const idCounters = Object.create(null);
- let toc = '';
- let depth = 0;
-
- visit(tree, null, (node) => {
- if (node.type !== 'heading') return;
-
- if (node.depth - depth > 1) {
- throw new Error(
- `Inappropriate heading level:\n${JSON.stringify(node)}`
- );
- }
-
- depth = node.depth;
- const realFilename = path.basename(filename, '.md');
- const headingText = file.contents.slice(
- node.children[0].position.start.offset,
- node.position.end.offset).trim();
- const id = getId(`${realFilename}_${headingText}`, idCounters);
+function buildToc(lexed, filename) {
+ const startIncludeRefRE = /^\s*<!-- \[start-include:(.+)\] -->\s*$/;
+ const endIncludeRefRE = /^\s*<!-- \[end-include:.+\] -->\s*$/;
+ const realFilenames = [filename];
+ const idCounters = Object.create(null);
+ let toc = '';
+ let depth = 0;
+
+ lexed.forEach((token) => {
+ // Keep track of the current filename along comment wrappers of inclusions.
+ if (token.type === 'html') {
+ const [, includedFileName] = token.text.match(startIncludeRefRE) || [];
+ if (includedFileName !== undefined)
+ realFilenames.unshift(includedFileName);
+ else if (endIncludeRefRE.test(token.text))
+ realFilenames.shift();
+ }
- const hasStability = node.stability !== undefined;
- toc += ' '.repeat((depth - 1) * 2) +
- (hasStability ? `* <span class="stability_${node.stability}">` : '* ') +
- `<a href="#${id}">${headingText}</a>${hasStability ? '</span>' : ''}\n`;
+ if (token.type !== 'heading') return;
- let anchor =
- `<span><a class="mark" href="#${id}" id="${id}">#</a></span>`;
+ if (token.depth - depth > 1) {
+ throw new Error(`Inappropriate heading level:\n${JSON.stringify(token)}`);
+ }
- if (realFilename === 'errors' && headingText.startsWith('ERR_')) {
- anchor += `<span><a class="mark" href="#${headingText}" ` +
- `id="${headingText}">#</a></span>`;
- }
+ depth = token.depth;
+ const realFilename = path.basename(realFilenames[0], '.md');
+ const headingText = token.text.trim();
+ const id = getId(`${realFilename}_${headingText}`, idCounters);
- const api = headingText.replace(/^.*:\s+/, '').replace(/\(.*/, '');
- if (apilinks[api]) {
- anchor = `<a class="srclink" href=${apilinks[api]}>[src]</a>${anchor}`;
- }
+ const hasStability = token.stability !== undefined;
+ toc += ' '.repeat((depth - 1) * 2) +
+ (hasStability ? `* <span class="stability_${token.stability}">` : '* ') +
+ `<a href="#${id}">${token.text}</a>${hasStability ? '</span>' : ''}\n`;
- node.children.push({ type: 'html', value: anchor });
- });
+ token.text += `<span><a class="mark" href="#${id}" id="${id}">#</a></span>`;
+ if (realFilename === 'errors' && headingText.startsWith('ERR_')) {
+ token.text += `<span><a class="mark" href="#${headingText}" ` +
+ `id="${headingText}">#</a></span>`;
+ }
+ });
- file.toc = unified()
- .use(markdown)
- .use(remark2rehype, { allowDangerousHTML: true })
- .use(raw)
- .use(htmlStringify)
- .processSync(toc).toString();
- };
+ return marked(toc);
}
const notAlphaNumerics = /[^a-z0-9]+/g;
'use strict';
-const unified = require('unified');
-const common = require('./common.js');
-const html = require('remark-html');
-const select = require('unist-util-select');
-
-module.exports = { jsonAPI };
-
-// Unified processor: input is https://github.com/syntax-tree/mdast,
-// output is: https://gist.github.com/1777387.
-function jsonAPI({ filename }) {
- return (tree, file) => {
-
- const exampleHeading = /^example/i;
- const metaExpr = /<!--([^=]+)=([^-]+)-->\n*/g;
- const stabilityExpr = /^Stability: ([0-5])(?:\s*-\s*)?(.*)$/s;
-
- // Extract definitions.
- const definitions = select(tree, 'definition');
-
- // Determine the start, stop, and depth of each section.
- const sections = [];
- let section = null;
- tree.children.forEach((node, i) => {
- if (node.type === 'heading' &&
- !exampleHeading.test(textJoin(node.children, file))) {
- if (section) section.stop = i - 1;
- section = { start: i, stop: tree.children.length, depth: node.depth };
- sections.push(section);
- }
- });
+module.exports = doJSON;
+
+// Take the lexed input, and return a JSON-encoded object.
+// A module looks like this: https://gist.github.com/1777387.
- // Collect and capture results.
- const result = { type: 'module', source: filename };
- while (sections.length > 0) {
- doSection(sections.shift(), result);
+const common = require('./common.js');
+const marked = require('marked');
+
+// Customized heading without id attribute.
+const renderer = new marked.Renderer();
+renderer.heading = (text, level) => `<h${level}>${text}</h${level}>\n`;
+marked.setOptions({ renderer });
+
+
+function doJSON(input, filename, cb) {
+ const root = { source: filename };
+ const stack = [root];
+ let depth = 0;
+ let current = root;
+ let state = null;
+
+ const exampleHeading = /^example/i;
+ const metaExpr = /<!--([^=]+)=([^-]+)-->\n*/g;
+ const stabilityExpr = /^Stability: ([0-5])(?:\s*-\s*)?(.*)$/s;
+
+ const lexed = marked.lexer(input);
+ lexed.forEach((tok) => {
+ const { type } = tok;
+ let { text } = tok;
+
+ // <!-- name=module -->
+ // This is for cases where the markdown semantic structure is lacking.
+ if (type === 'paragraph' || type === 'html') {
+ text = text.replace(metaExpr, (_0, key, value) => {
+ current[key.trim()] = value.trim();
+ return '';
+ });
+ text = text.trim();
+ if (!text) return;
}
- file.json = result;
- // Process a single section (recursively, including subsections).
- function doSection(section, parent) {
- if (section.depth - parent.depth > 1) {
- throw new Error('Inappropriate heading level\n' +
- JSON.stringify(section));
+ if (type === 'heading' && !exampleHeading.test(text.trim())) {
+ if (tok.depth - depth > 1) {
+ return cb(
+ new Error(`Inappropriate heading level\n${JSON.stringify(tok)}`));
}
- const current = newSection(tree.children[section.start], file);
- let nodes = tree.children.slice(section.start + 1, section.stop + 1);
-
// Sometimes we have two headings with a single blob of description.
// Treat as a clone.
- if (
- nodes.length === 0 && sections.length > 0 &&
- section.depth === sections[0].depth
- ) {
- nodes = tree.children.slice(sections[0].start + 1,
- sections[0].stop + 1);
- }
-
- // Extract (and remove) metadata that is not directly inferable
- // from the markdown itself.
- nodes.forEach((node, i) => {
- // Input: <!-- name=module -->; output: {name: module}.
- if (node.type === 'html') {
- node.value = node.value.replace(metaExpr, (_0, key, value) => {
- current[key.trim()] = value.trim();
- return '';
- });
- if (!node.value.trim()) delete nodes[i];
+ if (state === 'AFTERHEADING' && depth === tok.depth) {
+ const clone = current;
+ current = newSection(tok);
+ current.clone = clone;
+ // Don't keep it around on the stack.
+ stack.pop();
+ } else {
+ // If the level is greater than the current depth,
+ // then it's a child, so we should just leave the stack as it is.
+ // However, if it's a sibling or higher, then it implies
+ // the closure of the other sections that came before.
+ // root is always considered the level=0 section,
+ // and the lowest heading is 1, so this should always
+ // result in having a valid parent node.
+ let closingDepth = tok.depth;
+ while (closingDepth <= depth) {
+ finishSection(stack.pop(), stack[stack.length - 1]);
+ closingDepth++;
}
+ current = newSection(tok);
+ }
- // Process metadata:
- // <!-- YAML
- // added: v1.0.0
- // -->
- if (node.type === 'html' && common.isYAMLBlock(node.value)) {
- current.meta = common.extractAndParseYAML(node.value);
- delete nodes[i];
- }
+ ({ depth } = tok);
+ stack.push(current);
+ state = 'AFTERHEADING';
+ return;
+ }
- // Stability marker: > Stability: ...
- if (
- node.type === 'blockquote' && node.children.length === 1 &&
- node.children[0].type === 'paragraph' &&
- nodes.slice(0, i).every((node) => node.type === 'list')
- ) {
- const text = textJoin(node.children[0].children, file);
- const stability = text.match(stabilityExpr);
- if (stability) {
- current.stability = parseInt(stability[1], 10);
- current.stabilityText = stability[2].trim();
- delete nodes[i];
- }
+ // Immediately after a heading, we can expect the following:
+ //
+ // { type: 'blockquote_start' },
+ // { type: 'paragraph', text: 'Stability: ...' },
+ // { type: 'blockquote_end' },
+ //
+ // A list: starting with list_start, ending with list_end,
+ // maybe containing other nested lists in each item.
+ //
+ // A metadata:
+ // <!-- YAML
+ // added: v1.0.0
+ // -->
+ //
+ // If one of these isn't found, then anything that comes
+ // between here and the next heading should be parsed as the desc.
+ if (state === 'AFTERHEADING') {
+ if (type === 'blockquote_start') {
+ state = 'AFTERHEADING_BLOCKQUOTE';
+ return;
+ } else if (type === 'list_start' && !tok.ordered) {
+ state = 'AFTERHEADING_LIST';
+ current.list = current.list || [];
+ current.list.push(tok);
+ current.list.level = 1;
+ } else if (type === 'html' && common.isYAMLBlock(tok.text)) {
+ current.meta = common.extractAndParseYAML(tok.text);
+ } else {
+ current.desc = current.desc || [];
+ if (!Array.isArray(current.desc)) {
+ current.shortDesc = current.desc;
+ current.desc = [];
}
- });
-
- // Compress the node array.
- nodes = nodes.filter(() => true);
-
- // If the first node is a list, extract it.
- const list = nodes[0] && nodes[0].type === 'list' ?
- nodes.shift() : null;
-
- // Now figure out what this list actually means.
- // Depending on the section type, the list could be different things.
- const values = list ?
- list.children.map((child) => parseListItem(child, file)) : [];
-
- switch (current.type) {
- case 'ctor':
- case 'classMethod':
- case 'method':
- // Each item is an argument, unless the name is 'return',
- // in which case it's the return value.
- const sig = {};
- sig.params = values.filter((value) => {
- if (value.name === 'return') {
- sig.return = value;
- return false;
- }
- return true;
- });
- parseSignature(current.textRaw, sig);
- current.signatures = [sig];
- break;
-
- case 'property':
- // There should be only one item, which is the value.
- // Copy the data up to the section.
- if (values.length) {
- const signature = values[0];
-
- // Shove the name in there for properties,
- // since they are always just going to be the value etc.
- signature.textRaw = `\`${current.name}\` ${signature.textRaw}`;
-
- for (const key in signature) {
- if (signature[key]) {
- if (key === 'type') {
- current.typeof = signature.type;
- } else {
- current[key] = signature[key];
- }
- }
- }
- }
- break;
-
- case 'event':
- // Event: each item is an argument.
- current.params = values;
- break;
-
- default:
- // If list wasn't consumed, put it back in the nodes list.
- if (list) nodes.unshift(list);
+ current.desc.links = lexed.links;
+ current.desc.push(tok);
+ state = 'DESC';
}
+ return;
+ }
- // Convert remaining nodes to a 'desc'.
- // Unified expects to process a string; but we ignore that as we
- // already have pre-parsed input that we can inject.
- if (nodes.length) {
- if (current.desc) current.shortDesc = current.desc;
-
- current.desc = unified()
- .use(function() {
- this.Parser = () => (
- { type: 'root', children: nodes.concat(definitions) }
- );
- })
- .use(html)
- .processSync('').toString().trim();
- if (!current.desc) delete current.desc;
+ if (state === 'AFTERHEADING_LIST') {
+ current.list.push(tok);
+ if (type === 'list_start') {
+ current.list.level++;
+ } else if (type === 'list_end') {
+ current.list.level--;
+ }
+ if (current.list.level === 0) {
+ state = 'AFTERHEADING';
+ processList(current);
}
+ return;
+ }
- // Process subsections.
- while (sections.length > 0 && sections[0].depth > section.depth) {
- doSection(sections.shift(), current);
+ if (state === 'AFTERHEADING_BLOCKQUOTE') {
+ if (type === 'blockquote_end') {
+ state = 'AFTERHEADING';
+ return;
}
- // If type is not set, default type based on parent type, and
- // set displayName and name properties.
- if (!current.type) {
- current.type = (parent.type === 'misc' ? 'misc' : 'module');
- current.displayName = current.name;
- current.name = current.name.toLowerCase()
- .trim().replace(/\s+/g, '_');
+ let stability;
+ if (type === 'paragraph' && (stability = text.match(stabilityExpr))) {
+ current.stability = parseInt(stability[1], 10);
+ current.stabilityText = stability[2].trim();
+ return;
}
+ }
+
+ current.desc = current.desc || [];
+ current.desc.links = lexed.links;
+ current.desc.push(tok);
+ });
- // Pluralize type to determine which 'bucket' to put this section in.
- let plur;
- if (current.type.slice(-1) === 's') {
- plur = `${current.type}es`;
- } else if (current.type.slice(-1) === 'y') {
- plur = current.type.replace(/y$/, 'ies');
+ // Finish any sections left open.
+ while (root !== (current = stack.pop())) {
+ finishSection(current, stack[stack.length - 1]);
+ }
+
+ return cb(null, root);
+}
+
+
+// Go from something like this:
+//
+// [ { type: "list_item_start" },
+// { type: "text",
+// text: "`options` {Object|string}" },
+// { type: "list_start",
+// ordered: false },
+// { type: "list_item_start" },
+// { type: "text",
+// text: "`encoding` {string|null} **Default:** `'utf8'`" },
+// { type: "list_item_end" },
+// { type: "list_item_start" },
+// { type: "text",
+// text: "`mode` {integer} **Default:** `0o666`" },
+// { type: "list_item_end" },
+// { type: "list_item_start" },
+// { type: "text",
+// text: "`flag` {string} **Default:** `'a'`" },
+// { type: "space" },
+// { type: "list_item_end" },
+// { type: "list_end" },
+// { type: "list_item_end" } ]
+//
+// to something like:
+//
+// [ { textRaw: "`options` {Object|string} ",
+// options: [
+// { textRaw: "`encoding` {string|null} **Default:** `'utf8'` ",
+// name: "encoding",
+// type: "string|null",
+// default: "`'utf8'`" },
+// { textRaw: "`mode` {integer} **Default:** `0o666` ",
+// name: "mode",
+// type: "integer",
+// default: "`0o666`" },
+// { textRaw: "`flag` {string} **Default:** `'a'` ",
+// name: "flag",
+// type: "string",
+// default: "`'a'`" } ],
+// name: "options",
+// type: "Object|string",
+// optional: true } ]
+
+function processList(section) {
+ const { list } = section;
+ const values = [];
+ const stack = [];
+ let current;
+
+ // For now, *just* build the hierarchical list.
+ list.forEach((tok) => {
+ const { type } = tok;
+ if (type === 'space') return;
+ if (type === 'list_item_start' || type === 'loose_item_start') {
+ const item = {};
+ if (!current) {
+ values.push(item);
+ current = item;
} else {
- plur = `${current.type}s`;
+ current.options = current.options || [];
+ stack.push(current);
+ current.options.push(item);
+ current = item;
}
-
- // Classes sometimes have various 'ctor' children
- // which are actually just descriptions of a constructor class signature.
- // Merge them into the parent.
- if (current.type === 'class' && current.ctors) {
- current.signatures = current.signatures || [];
- const sigs = current.signatures;
- current.ctors.forEach((ctor) => {
- ctor.signatures = ctor.signatures || [{}];
- ctor.signatures.forEach((sig) => {
- sig.desc = ctor.desc;
- });
- sigs.push(...ctor.signatures);
- });
- delete current.ctors;
+ } else if (type === 'list_item_end') {
+ if (!current) {
+ throw new Error('invalid list - end without current item\n' +
+ `${JSON.stringify(tok)}\n` +
+ JSON.stringify(list));
}
+ current = stack.pop();
+ } else if (type === 'text') {
+ if (!current) {
+ throw new Error('invalid list - text without current item\n' +
+ `${JSON.stringify(tok)}\n` +
+ JSON.stringify(list));
+ }
+ current.textRaw = `${current.textRaw || ''}${tok.text} `;
+ }
+ });
- // Properties are a bit special.
- // Their "type" is the type of object, not "property".
- if (current.type === 'property') {
- if (current.typeof) {
- current.type = current.typeof;
- delete current.typeof;
- } else {
- delete current.type;
+ // Shove the name in there for properties,
+ // since they are always just going to be the value etc.
+ if (section.type === 'property' && values[0]) {
+ values[0].textRaw = `\`${section.name}\` ${values[0].textRaw}`;
+ }
+
+ // Now pull the actual values out of the text bits.
+ values.forEach(parseListItem);
+
+ // Now figure out what this list actually means.
+ // Depending on the section type, the list could be different things.
+
+ switch (section.type) {
+ case 'ctor':
+ case 'classMethod':
+ case 'method': {
+ // Each item is an argument, unless the name is 'return',
+ // in which case it's the return value.
+ const sig = {};
+ section.signatures = section.signatures || [];
+ sig.params = values.filter((value) => {
+ if (value.name === 'return') {
+ sig.return = value;
+ return false;
}
- }
+ return true;
+ });
+ parseSignature(section.textRaw, sig);
+ if (!sig.jump) section.signatures.push(sig);
+ break;
+ }
- // If the parent's type is 'misc', then it's just a random
- // collection of stuff, like the "globals" section.
- // Make the children top-level items.
- if (current.type === 'misc') {
- Object.keys(current).forEach((key) => {
- switch (key) {
- case 'textRaw':
- case 'name':
- case 'type':
- case 'desc':
- case 'miscs':
- return;
- default:
- if (parent.type === 'misc') {
- return;
- }
- if (parent[key] && Array.isArray(parent[key])) {
- parent[key] = parent[key].concat(current[key]);
- } else if (!parent[key]) {
- parent[key] = current[key];
- }
- }
- });
- }
+ case 'property': {
+ // There should be only one item, which is the value.
+ // Copy the data up to the section.
+ const value = values[0] || {};
+ delete value.name;
+ section.typeof = value.type || section.typeof;
+ delete value.type;
+ Object.keys(value).forEach((key) => {
+ section[key] = value[key];
+ });
+ break;
+ }
- // Add this section to the parent. Sometimes we have two headings with a
- // single blob of description. If the preceding entry at this level
- // shares a name and is lacking a description, copy it backwards.
- if (!parent[plur]) parent[plur] = [];
- const prev = parent[plur].slice(-1)[0];
- if (prev && prev.name === current.name && !prev.desc) {
- prev.desc = current.desc;
+ case 'event':
+ // Event: each item is an argument.
+ section.params = values;
+ break;
+
+ default:
+ if (section.list.length > 0) {
+ section.desc = section.desc || [];
+ section.desc.push(...section.list);
}
- parent[plur].push(current);
- }
- };
+ }
+
+ delete section.list;
}
// text: "someobject.someMethod(a[, b=100][, c])"
function parseSignature(text, sig) {
const list = [];
-
let [, sigParams] = text.match(paramExpr) || [];
if (!sigParams) return;
sigParams = sigParams.split(',');
}
if (!listParam) {
- if (sigParam.startsWith('...')) {
- listParam = { name: sigParam };
- } else {
- throw new Error(
- `Invalid param "${sigParam}"\n` +
- ` > ${JSON.stringify(listParam)}\n` +
- ` > ${text}`
- );
- }
+ sig.jump = true;
+ return;
}
}
list.push(listParam);
});
-
sig.params = list;
}
const leadingHyphen = /^-\s*/;
const defaultExpr = /\s*\*\*Default:\*\*\s*([^]+)$/i;
-function parseListItem(item, file) {
- const current = {};
-
- current.textRaw = item.children.filter((node) => node.type !== 'list')
- .map((node) => (
- file.contents.slice(node.position.start.offset, node.position.end.offset))
- )
- .join('').replace(/\s+/g, ' ').replace(/<!--.*?-->/sg, '');
- let text = current.textRaw;
-
- if (!text) {
+function parseListItem(item) {
+ if (item.options) item.options.forEach(parseListItem);
+ if (!item.textRaw) {
throw new Error(`Empty list item: ${JSON.stringify(item)}`);
}
- // The goal here is to find the name, type, default.
+ // The goal here is to find the name, type, default, and optional.
// Anything left over is 'desc'.
+ let text = item.textRaw.trim();
if (returnExpr.test(text)) {
- current.name = 'return';
+ item.name = 'return';
text = text.replace(returnExpr, '');
} else {
const [, name] = text.match(nameExpr) || [];
if (name) {
- current.name = name;
+ item.name = name;
text = text.replace(nameExpr, '');
}
}
const [, type] = text.match(typeExpr) || [];
if (type) {
- current.type = type;
+ item.type = type;
text = text.replace(typeExpr, '');
}
const [, defaultValue] = text.match(defaultExpr) || [];
if (defaultValue) {
- current.default = defaultValue.replace(/\.$/, '');
+ item.default = defaultValue.replace(/\.$/, '');
text = text.replace(defaultExpr, '');
}
- if (text) current.desc = text;
+ if (text) item.desc = text;
+}
+
+
+function finishSection(section, parent) {
+ if (!section || !parent) {
+ throw new Error('Invalid finishSection call\n' +
+ `${JSON.stringify(section)}\n` +
+ JSON.stringify(parent));
+ }
+
+ if (!section.type) {
+ section.type = 'module';
+ if (parent.type === 'misc') {
+ section.type = 'misc';
+ }
+ section.displayName = section.name;
+ section.name = section.name.toLowerCase()
+ .trim().replace(/\s+/g, '_');
+ }
+
+ if (section.desc && Array.isArray(section.desc)) {
+ section.desc.links = section.desc.links || [];
+ section.desc = marked.parser(section.desc);
+ }
+
+ if (!section.list) section.list = [];
+ processList(section);
+
+ // Classes sometimes have various 'ctor' children
+ // which are actually just descriptions of a constructor class signature.
+ // Merge them into the parent.
+ if (section.type === 'class' && section.ctors) {
+ section.signatures = section.signatures || [];
+ const sigs = section.signatures;
+ section.ctors.forEach((ctor) => {
+ ctor.signatures = ctor.signatures || [{}];
+ ctor.signatures.forEach((sig) => {
+ sig.desc = ctor.desc;
+ });
+ sigs.push(...ctor.signatures);
+ });
+ delete section.ctors;
+ }
+
+ // Properties are a bit special.
+ // Their "type" is the type of object, not "property".
+ if (section.properties) {
+ section.properties.forEach((prop) => {
+ if (prop.typeof) {
+ prop.type = prop.typeof;
+ delete prop.typeof;
+ } else {
+ delete prop.type;
+ }
+ });
+ }
+
+ // Handle clones.
+ if (section.clone) {
+ const { clone } = section;
+ delete section.clone;
+ delete clone.clone;
+ deepCopy(section, clone);
+ finishSection(clone, parent);
+ }
- const options = item.children.find((child) => child.type === 'list');
- if (options) {
- current.options = options.children.map((child) => (
- parseListItem(child, file)
- ));
+ let plur;
+ if (section.type.slice(-1) === 's') {
+ plur = `${section.type}es`;
+ } else if (section.type.slice(-1) === 'y') {
+ plur = section.type.replace(/y$/, 'ies');
+ } else {
+ plur = `${section.type}s`;
}
- return current;
+ // If the parent's type is 'misc', then it's just a random
+ // collection of stuff, like the "globals" section.
+ // Make the children top-level items.
+ if (section.type === 'misc') {
+ Object.keys(section).forEach((key) => {
+ switch (key) {
+ case 'textRaw':
+ case 'name':
+ case 'type':
+ case 'desc':
+ case 'miscs':
+ return;
+ default:
+ if (parent.type === 'misc') {
+ return;
+ }
+ if (parent[key] && Array.isArray(parent[key])) {
+ parent[key] = parent[key].concat(section[key]);
+ } else if (!parent[key]) {
+ parent[key] = section[key];
+ }
+ }
+ });
+ }
+
+ parent[plur] = parent[plur] || [];
+ parent[plur].push(section);
}
-// This section parses out the contents of an H# tag.
-// To reduce escape slashes in RegExp string components.
+// Not a general purpose deep copy.
+// But sufficient for these basic things.
+function deepCopy(src, dest) {
+ Object.keys(src)
+ .filter((key) => !dest.hasOwnProperty(key))
+ .forEach((key) => { dest[key] = cloneValue(src[key]); });
+}
+
+function cloneValue(src) {
+ if (!src) return src;
+ if (Array.isArray(src)) {
+ const clone = new Array(src.length);
+ src.forEach((value, i) => {
+ clone[i] = cloneValue(value);
+ });
+ return clone;
+ }
+ if (typeof src === 'object') {
+ const clone = {};
+ Object.keys(src).forEach((key) => {
+ clone[key] = cloneValue(src[key]);
+ });
+ return clone;
+ }
+ return src;
+}
+
+
+// This section parse out the contents of an H# tag.
+
+// To reduse escape slashes in RegExp string components.
const r = String.raw;
const eventPrefix = '^Event: +';
];
/* eslint-enable max-len */
-function newSection(header, file) {
- const text = textJoin(header.children, file);
-
+function newSection({ text }) {
// Infer the type from the text.
for (const { type, re } of headingExpressions) {
const [, name] = text.match(re) || [];
return { textRaw: text, name: text };
}
-function textJoin(nodes, file) {
- return nodes.map((node) => {
- if (node.type === 'linkReference') {
- return file.contents.slice(node.position.start.offset,
- node.position.end.offset);
- } else if (node.type === 'inlineCode') {
- return `\`${node.value}\``;
- } else if (node.type === 'strong') {
- return `**${textJoin(node.children, file)}**`;
- } else if (node.type === 'emphasis') {
- return `_${textJoin(node.children, file)}_`;
- } else if (node.children) {
- return textJoin(node.children, file);
- }
- return node.value;
- }).join('');
-}
};
const arrayPart = /(?:\[])+$/;
+const arrayWrap = /^\[(\w+)\]$/;
function toLink(typeInput) {
const typeLinks = [];
typeInput = typeInput.replace('{', '').replace('}', '');
- const typeTexts = typeInput.split('|');
+ const typeTexts = typeInput.split(/\||,/);
typeTexts.forEach((typeText) => {
typeText = typeText.trim();
// To support type[], type[][] etc., we store the full string
// and use the bracket-less version to lookup the type URL.
const typeTextFull = typeText;
- typeText = typeText.replace(arrayPart, '');
+ typeText = typeText.replace(arrayPart, '').replace(arrayWrap, '$1');
const primitive = jsPrimitives[typeText];
changelog = readFileSync(file, { encoding: 'utf8' });
} else {
try {
- changelog = await getUrl(url);
+ changelog = await require('fs').promises.readFile(require('path').join(__dirname, '..', '..', 'CHANGELOG.md'));
} catch (e) {
// Fail if this is a release build, otherwise fallback to local files.
if (isRelease()) {