diff options
Diffstat (limited to 'node_modules/markdown-it/lib/rules_inline')
14 files changed, 1306 insertions, 0 deletions
diff --git a/node_modules/markdown-it/lib/rules_inline/autolink.js b/node_modules/markdown-it/lib/rules_inline/autolink.js new file mode 100644 index 0000000..66deb90 --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/autolink.js @@ -0,0 +1,76 @@ +// Process autolinks '<protocol:...>' + +'use strict'; + + +/*eslint max-len:0*/ +var EMAIL_RE = /^([a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)$/; +var AUTOLINK_RE = /^([a-zA-Z][a-zA-Z0-9+.\-]{1,31}):([^<>\x00-\x20]*)$/; + + +module.exports = function autolink(state, silent) { + var url, fullUrl, token, ch, start, max, + pos = state.pos; + + if (state.src.charCodeAt(pos) !== 0x3C/* < */) { return false; } + + start = state.pos; + max = state.posMax; + + for (;;) { + if (++pos >= max) return false; + + ch = state.src.charCodeAt(pos); + + if (ch === 0x3C /* < */) return false; + if (ch === 0x3E /* > */) break; + } + + url = state.src.slice(start + 1, pos); + + if (AUTOLINK_RE.test(url)) { + fullUrl = state.md.normalizeLink(url); + if (!state.md.validateLink(fullUrl)) { return false; } + + if (!silent) { + token = state.push('link_open', 'a', 1); + token.attrs = [ [ 'href', fullUrl ] ]; + token.markup = 'autolink'; + token.info = 'auto'; + + token = state.push('text', '', 0); + token.content = state.md.normalizeLinkText(url); + + token = state.push('link_close', 'a', -1); + token.markup = 'autolink'; + token.info = 'auto'; + } + + state.pos += url.length + 2; + return true; + } + + if (EMAIL_RE.test(url)) { + fullUrl = state.md.normalizeLink('mailto:' + url); + if (!state.md.validateLink(fullUrl)) { return false; } + + if (!silent) { + token = state.push('link_open', 'a', 1); + token.attrs = [ [ 'href', fullUrl ] ]; + token.markup = 'autolink'; + token.info = 'auto'; + + token = state.push('text', '', 0); + token.content = state.md.normalizeLinkText(url); + + token = state.push('link_close', 'a', -1); + token.markup = 'autolink'; + token.info = 'auto'; + } + + state.pos += url.length + 2; + return true; + } + + return false; +}; diff --git a/node_modules/markdown-it/lib/rules_inline/backticks.js b/node_modules/markdown-it/lib/rules_inline/backticks.js new file mode 100644 index 0000000..b9c9ddb --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/backticks.js @@ -0,0 +1,63 @@ +// Parse backticks + +'use strict'; + + +module.exports = function backtick(state, silent) { + var start, max, marker, token, matchStart, matchEnd, openerLength, closerLength, + pos = state.pos, + ch = state.src.charCodeAt(pos); + + if (ch !== 0x60/* ` */) { return false; } + + start = pos; + pos++; + max = state.posMax; + + // scan marker length + while (pos < max && state.src.charCodeAt(pos) === 0x60/* ` */) { pos++; } + + marker = state.src.slice(start, pos); + openerLength = marker.length; + + if (state.backticksScanned && (state.backticks[openerLength] || 0) <= start) { + if (!silent) state.pending += marker; + state.pos += openerLength; + return true; + } + + matchStart = matchEnd = pos; + + // Nothing found in the cache, scan until the end of the line (or until marker is found) + while ((matchStart = state.src.indexOf('`', matchEnd)) !== -1) { + matchEnd = matchStart + 1; + + // scan marker length + while (matchEnd < max && state.src.charCodeAt(matchEnd) === 0x60/* ` */) { matchEnd++; } + + closerLength = matchEnd - matchStart; + + if (closerLength === openerLength) { + // Found matching closer length. + if (!silent) { + token = state.push('code_inline', 'code', 0); + token.markup = marker; + token.content = state.src.slice(pos, matchStart) + .replace(/\n/g, ' ') + .replace(/^ (.+) $/, '$1'); + } + state.pos = matchEnd; + return true; + } + + // Some different length found, put it in cache as upper limit of where closer can be found + state.backticks[closerLength] = matchStart; + } + + // Scanned through the end, didn't find anything + state.backticksScanned = true; + + if (!silent) state.pending += marker; + state.pos += openerLength; + return true; +}; diff --git a/node_modules/markdown-it/lib/rules_inline/balance_pairs.js b/node_modules/markdown-it/lib/rules_inline/balance_pairs.js new file mode 100644 index 0000000..4faad90 --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/balance_pairs.js @@ -0,0 +1,130 @@ +// For each opening emphasis-like marker find a matching closing one +// +'use strict'; + + +function processDelimiters(state, delimiters) { + var closerIdx, openerIdx, closer, opener, minOpenerIdx, newMinOpenerIdx, + isOddMatch, lastJump, + openersBottom = {}, + max = delimiters.length; + + if (!max) return; + + // headerIdx is the first delimiter of the current (where closer is) delimiter run + var headerIdx = 0; + var lastTokenIdx = -2; // needs any value lower than -1 + var jumps = []; + + for (closerIdx = 0; closerIdx < max; closerIdx++) { + closer = delimiters[closerIdx]; + + jumps.push(0); + + // markers belong to same delimiter run if: + // - they have adjacent tokens + // - AND markers are the same + // + if (delimiters[headerIdx].marker !== closer.marker || lastTokenIdx !== closer.token - 1) { + headerIdx = closerIdx; + } + + lastTokenIdx = closer.token; + + // Length is only used for emphasis-specific "rule of 3", + // if it's not defined (in strikethrough or 3rd party plugins), + // we can default it to 0 to disable those checks. + // + closer.length = closer.length || 0; + + if (!closer.close) continue; + + // Previously calculated lower bounds (previous fails) + // for each marker, each delimiter length modulo 3, + // and for whether this closer can be an opener; + // https://github.com/commonmark/cmark/commit/34250e12ccebdc6372b8b49c44fab57c72443460 + if (!openersBottom.hasOwnProperty(closer.marker)) { + openersBottom[closer.marker] = [ -1, -1, -1, -1, -1, -1 ]; + } + + minOpenerIdx = openersBottom[closer.marker][(closer.open ? 3 : 0) + (closer.length % 3)]; + + openerIdx = headerIdx - jumps[headerIdx] - 1; + + newMinOpenerIdx = openerIdx; + + for (; openerIdx > minOpenerIdx; openerIdx -= jumps[openerIdx] + 1) { + opener = delimiters[openerIdx]; + + if (opener.marker !== closer.marker) continue; + + if (opener.open && opener.end < 0) { + + isOddMatch = false; + + // from spec: + // + // If one of the delimiters can both open and close emphasis, then the + // sum of the lengths of the delimiter runs containing the opening and + // closing delimiters must not be a multiple of 3 unless both lengths + // are multiples of 3. + // + if (opener.close || closer.open) { + if ((opener.length + closer.length) % 3 === 0) { + if (opener.length % 3 !== 0 || closer.length % 3 !== 0) { + isOddMatch = true; + } + } + } + + if (!isOddMatch) { + // If previous delimiter cannot be an opener, we can safely skip + // the entire sequence in future checks. This is required to make + // sure algorithm has linear complexity (see *_*_*_*_*_... case). + // + lastJump = openerIdx > 0 && !delimiters[openerIdx - 1].open ? + jumps[openerIdx - 1] + 1 : + 0; + + jumps[closerIdx] = closerIdx - openerIdx + lastJump; + jumps[openerIdx] = lastJump; + + closer.open = false; + opener.end = closerIdx; + opener.close = false; + newMinOpenerIdx = -1; + // treat next token as start of run, + // it optimizes skips in **<...>**a**<...>** pathological case + lastTokenIdx = -2; + break; + } + } + } + + if (newMinOpenerIdx !== -1) { + // If match for this delimiter run failed, we want to set lower bound for + // future lookups. This is required to make sure algorithm has linear + // complexity. + // + // See details here: + // https://github.com/commonmark/cmark/issues/178#issuecomment-270417442 + // + openersBottom[closer.marker][(closer.open ? 3 : 0) + ((closer.length || 0) % 3)] = newMinOpenerIdx; + } + } +} + + +module.exports = function link_pairs(state) { + var curr, + tokens_meta = state.tokens_meta, + max = state.tokens_meta.length; + + processDelimiters(state, state.delimiters); + + for (curr = 0; curr < max; curr++) { + if (tokens_meta[curr] && tokens_meta[curr].delimiters) { + processDelimiters(state, tokens_meta[curr].delimiters); + } + } +}; diff --git a/node_modules/markdown-it/lib/rules_inline/emphasis.js b/node_modules/markdown-it/lib/rules_inline/emphasis.js new file mode 100644 index 0000000..7e8ab4c --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/emphasis.js @@ -0,0 +1,130 @@ +// Process *this* and _that_ +// +'use strict'; + + +// Insert each marker as a separate text token, and add it to delimiter list +// +module.exports.tokenize = function emphasis(state, silent) { + var i, scanned, token, + start = state.pos, + marker = state.src.charCodeAt(start); + + if (silent) { return false; } + + if (marker !== 0x5F /* _ */ && marker !== 0x2A /* * */) { return false; } + + scanned = state.scanDelims(state.pos, marker === 0x2A); + + for (i = 0; i < scanned.length; i++) { + token = state.push('text', '', 0); + token.content = String.fromCharCode(marker); + + state.delimiters.push({ + // Char code of the starting marker (number). + // + marker: marker, + + // Total length of these series of delimiters. + // + length: scanned.length, + + // A position of the token this delimiter corresponds to. + // + token: state.tokens.length - 1, + + // If this delimiter is matched as a valid opener, `end` will be + // equal to its position, otherwise it's `-1`. + // + end: -1, + + // Boolean flags that determine if this delimiter could open or close + // an emphasis. + // + open: scanned.can_open, + close: scanned.can_close + }); + } + + state.pos += scanned.length; + + return true; +}; + + +function postProcess(state, delimiters) { + var i, + startDelim, + endDelim, + token, + ch, + isStrong, + max = delimiters.length; + + for (i = max - 1; i >= 0; i--) { + startDelim = delimiters[i]; + + if (startDelim.marker !== 0x5F/* _ */ && startDelim.marker !== 0x2A/* * */) { + continue; + } + + // Process only opening markers + if (startDelim.end === -1) { + continue; + } + + endDelim = delimiters[startDelim.end]; + + // If the previous delimiter has the same marker and is adjacent to this one, + // merge those into one strong delimiter. + // + // `<em><em>whatever</em></em>` -> `<strong>whatever</strong>` + // + isStrong = i > 0 && + delimiters[i - 1].end === startDelim.end + 1 && + // check that first two markers match and adjacent + delimiters[i - 1].marker === startDelim.marker && + delimiters[i - 1].token === startDelim.token - 1 && + // check that last two markers are adjacent (we can safely assume they match) + delimiters[startDelim.end + 1].token === endDelim.token + 1; + + ch = String.fromCharCode(startDelim.marker); + + token = state.tokens[startDelim.token]; + token.type = isStrong ? 'strong_open' : 'em_open'; + token.tag = isStrong ? 'strong' : 'em'; + token.nesting = 1; + token.markup = isStrong ? ch + ch : ch; + token.content = ''; + + token = state.tokens[endDelim.token]; + token.type = isStrong ? 'strong_close' : 'em_close'; + token.tag = isStrong ? 'strong' : 'em'; + token.nesting = -1; + token.markup = isStrong ? ch + ch : ch; + token.content = ''; + + if (isStrong) { + state.tokens[delimiters[i - 1].token].content = ''; + state.tokens[delimiters[startDelim.end + 1].token].content = ''; + i--; + } + } +} + + +// Walk through delimiter list and replace text tokens with tags +// +module.exports.postProcess = function emphasis(state) { + var curr, + tokens_meta = state.tokens_meta, + max = state.tokens_meta.length; + + postProcess(state, state.delimiters); + + for (curr = 0; curr < max; curr++) { + if (tokens_meta[curr] && tokens_meta[curr].delimiters) { + postProcess(state, tokens_meta[curr].delimiters); + } + } +}; diff --git a/node_modules/markdown-it/lib/rules_inline/entity.js b/node_modules/markdown-it/lib/rules_inline/entity.js new file mode 100644 index 0000000..6fcc889 --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/entity.js @@ -0,0 +1,48 @@ +// Process html entity - {, ¯, ", ... + +'use strict'; + +var entities = require('../common/entities'); +var has = require('../common/utils').has; +var isValidEntityCode = require('../common/utils').isValidEntityCode; +var fromCodePoint = require('../common/utils').fromCodePoint; + + +var DIGITAL_RE = /^&#((?:x[a-f0-9]{1,6}|[0-9]{1,7}));/i; +var NAMED_RE = /^&([a-z][a-z0-9]{1,31});/i; + + +module.exports = function entity(state, silent) { + var ch, code, match, pos = state.pos, max = state.posMax; + + if (state.src.charCodeAt(pos) !== 0x26/* & */) { return false; } + + if (pos + 1 < max) { + ch = state.src.charCodeAt(pos + 1); + + if (ch === 0x23 /* # */) { + match = state.src.slice(pos).match(DIGITAL_RE); + if (match) { + if (!silent) { + code = match[1][0].toLowerCase() === 'x' ? parseInt(match[1].slice(1), 16) : parseInt(match[1], 10); + state.pending += isValidEntityCode(code) ? fromCodePoint(code) : fromCodePoint(0xFFFD); + } + state.pos += match[0].length; + return true; + } + } else { + match = state.src.slice(pos).match(NAMED_RE); + if (match) { + if (has(entities, match[1])) { + if (!silent) { state.pending += entities[match[1]]; } + state.pos += match[0].length; + return true; + } + } + } + } + + if (!silent) { state.pending += '&'; } + state.pos++; + return true; +}; diff --git a/node_modules/markdown-it/lib/rules_inline/escape.js b/node_modules/markdown-it/lib/rules_inline/escape.js new file mode 100644 index 0000000..229ead0 --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/escape.js @@ -0,0 +1,52 @@ +// Process escaped chars and hardbreaks + +'use strict'; + +var isSpace = require('../common/utils').isSpace; + +var ESCAPED = []; + +for (var i = 0; i < 256; i++) { ESCAPED.push(0); } + +'\\!"#$%&\'()*+,./:;<=>?@[]^_`{|}~-' + .split('').forEach(function (ch) { ESCAPED[ch.charCodeAt(0)] = 1; }); + + +module.exports = function escape(state, silent) { + var ch, pos = state.pos, max = state.posMax; + + if (state.src.charCodeAt(pos) !== 0x5C/* \ */) { return false; } + + pos++; + + if (pos < max) { + ch = state.src.charCodeAt(pos); + + if (ch < 256 && ESCAPED[ch] !== 0) { + if (!silent) { state.pending += state.src[pos]; } + state.pos += 2; + return true; + } + + if (ch === 0x0A) { + if (!silent) { + state.push('hardbreak', 'br', 0); + } + + pos++; + // skip leading whitespaces from next line + while (pos < max) { + ch = state.src.charCodeAt(pos); + if (!isSpace(ch)) { break; } + pos++; + } + + state.pos = pos; + return true; + } + } + + if (!silent) { state.pending += '\\'; } + state.pos++; + return true; +}; diff --git a/node_modules/markdown-it/lib/rules_inline/html_inline.js b/node_modules/markdown-it/lib/rules_inline/html_inline.js new file mode 100644 index 0000000..28c7980 --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/html_inline.js @@ -0,0 +1,47 @@ +// Process html tags + +'use strict'; + + +var HTML_TAG_RE = require('../common/html_re').HTML_TAG_RE; + + +function isLetter(ch) { + /*eslint no-bitwise:0*/ + var lc = ch | 0x20; // to lower case + return (lc >= 0x61/* a */) && (lc <= 0x7a/* z */); +} + + +module.exports = function html_inline(state, silent) { + var ch, match, max, token, + pos = state.pos; + + if (!state.md.options.html) { return false; } + + // Check start + max = state.posMax; + if (state.src.charCodeAt(pos) !== 0x3C/* < */ || + pos + 2 >= max) { + return false; + } + + // Quick fail on second char + ch = state.src.charCodeAt(pos + 1); + if (ch !== 0x21/* ! */ && + ch !== 0x3F/* ? */ && + ch !== 0x2F/* / */ && + !isLetter(ch)) { + return false; + } + + match = state.src.slice(pos).match(HTML_TAG_RE); + if (!match) { return false; } + + if (!silent) { + token = state.push('html_inline', '', 0); + token.content = state.src.slice(pos, pos + match[0].length); + } + state.pos += match[0].length; + return true; +}; diff --git a/node_modules/markdown-it/lib/rules_inline/image.js b/node_modules/markdown-it/lib/rules_inline/image.js new file mode 100644 index 0000000..53edd32 --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/image.js @@ -0,0 +1,152 @@ +// Process ![image](<src> "title") + +'use strict'; + +var normalizeReference = require('../common/utils').normalizeReference; +var isSpace = require('../common/utils').isSpace; + + +module.exports = function image(state, silent) { + var attrs, + code, + content, + label, + labelEnd, + labelStart, + pos, + ref, + res, + title, + token, + tokens, + start, + href = '', + oldPos = state.pos, + max = state.posMax; + + if (state.src.charCodeAt(state.pos) !== 0x21/* ! */) { return false; } + if (state.src.charCodeAt(state.pos + 1) !== 0x5B/* [ */) { return false; } + + labelStart = state.pos + 2; + labelEnd = state.md.helpers.parseLinkLabel(state, state.pos + 1, false); + + // parser failed to find ']', so it's not a valid link + if (labelEnd < 0) { return false; } + + pos = labelEnd + 1; + if (pos < max && state.src.charCodeAt(pos) === 0x28/* ( */) { + // + // Inline link + // + + // [link]( <href> "title" ) + // ^^ skipping these spaces + pos++; + for (; pos < max; pos++) { + code = state.src.charCodeAt(pos); + if (!isSpace(code) && code !== 0x0A) { break; } + } + if (pos >= max) { return false; } + + // [link]( <href> "title" ) + // ^^^^^^ parsing link destination + start = pos; + res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax); + if (res.ok) { + href = state.md.normalizeLink(res.str); + if (state.md.validateLink(href)) { + pos = res.pos; + } else { + href = ''; + } + } + + // [link]( <href> "title" ) + // ^^ skipping these spaces + start = pos; + for (; pos < max; pos++) { + code = state.src.charCodeAt(pos); + if (!isSpace(code) && code !== 0x0A) { break; } + } + + // [link]( <href> "title" ) + // ^^^^^^^ parsing link title + res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax); + if (pos < max && start !== pos && res.ok) { + title = res.str; + pos = res.pos; + + // [link]( <href> "title" ) + // ^^ skipping these spaces + for (; pos < max; pos++) { + code = state.src.charCodeAt(pos); + if (!isSpace(code) && code !== 0x0A) { break; } + } + } else { + title = ''; + } + + if (pos >= max || state.src.charCodeAt(pos) !== 0x29/* ) */) { + state.pos = oldPos; + return false; + } + pos++; + } else { + // + // Link reference + // + if (typeof state.env.references === 'undefined') { return false; } + + if (pos < max && state.src.charCodeAt(pos) === 0x5B/* [ */) { + start = pos + 1; + pos = state.md.helpers.parseLinkLabel(state, pos); + if (pos >= 0) { + label = state.src.slice(start, pos++); + } else { + pos = labelEnd + 1; + } + } else { + pos = labelEnd + 1; + } + + // covers label === '' and label === undefined + // (collapsed reference link and shortcut reference link respectively) + if (!label) { label = state.src.slice(labelStart, labelEnd); } + + ref = state.env.references[normalizeReference(label)]; + if (!ref) { + state.pos = oldPos; + return false; + } + href = ref.href; + title = ref.title; + } + + // + // We found the end of the link, and know for a fact it's a valid link; + // so all that's left to do is to call tokenizer. + // + if (!silent) { + content = state.src.slice(labelStart, labelEnd); + + state.md.inline.parse( + content, + state.md, + state.env, + tokens = [] + ); + + token = state.push('image', 'img', 0); + token.attrs = attrs = [ [ 'src', href ], [ 'alt', '' ] ]; + token.children = tokens; + token.content = content; + + if (title) { + attrs.push([ 'title', title ]); + } + } + + state.pos = pos; + state.posMax = max; + return true; +}; diff --git a/node_modules/markdown-it/lib/rules_inline/link.js b/node_modules/markdown-it/lib/rules_inline/link.js new file mode 100644 index 0000000..1d242bf --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/link.js @@ -0,0 +1,148 @@ +// Process [link](<to> "stuff") + +'use strict'; + +var normalizeReference = require('../common/utils').normalizeReference; +var isSpace = require('../common/utils').isSpace; + + +module.exports = function link(state, silent) { + var attrs, + code, + label, + labelEnd, + labelStart, + pos, + res, + ref, + token, + href = '', + title = '', + oldPos = state.pos, + max = state.posMax, + start = state.pos, + parseReference = true; + + if (state.src.charCodeAt(state.pos) !== 0x5B/* [ */) { return false; } + + labelStart = state.pos + 1; + labelEnd = state.md.helpers.parseLinkLabel(state, state.pos, true); + + // parser failed to find ']', so it's not a valid link + if (labelEnd < 0) { return false; } + + pos = labelEnd + 1; + if (pos < max && state.src.charCodeAt(pos) === 0x28/* ( */) { + // + // Inline link + // + + // might have found a valid shortcut link, disable reference parsing + parseReference = false; + + // [link]( <href> "title" ) + // ^^ skipping these spaces + pos++; + for (; pos < max; pos++) { + code = state.src.charCodeAt(pos); + if (!isSpace(code) && code !== 0x0A) { break; } + } + if (pos >= max) { return false; } + + // [link]( <href> "title" ) + // ^^^^^^ parsing link destination + start = pos; + res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax); + if (res.ok) { + href = state.md.normalizeLink(res.str); + if (state.md.validateLink(href)) { + pos = res.pos; + } else { + href = ''; + } + + // [link]( <href> "title" ) + // ^^ skipping these spaces + start = pos; + for (; pos < max; pos++) { + code = state.src.charCodeAt(pos); + if (!isSpace(code) && code !== 0x0A) { break; } + } + + // [link]( <href> "title" ) + // ^^^^^^^ parsing link title + res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax); + if (pos < max && start !== pos && res.ok) { + title = res.str; + pos = res.pos; + + // [link]( <href> "title" ) + // ^^ skipping these spaces + for (; pos < max; pos++) { + code = state.src.charCodeAt(pos); + if (!isSpace(code) && code !== 0x0A) { break; } + } + } + } + + if (pos >= max || state.src.charCodeAt(pos) !== 0x29/* ) */) { + // parsing a valid shortcut link failed, fallback to reference + parseReference = true; + } + pos++; + } + + if (parseReference) { + // + // Link reference + // + if (typeof state.env.references === 'undefined') { return false; } + + if (pos < max && state.src.charCodeAt(pos) === 0x5B/* [ */) { + start = pos + 1; + pos = state.md.helpers.parseLinkLabel(state, pos); + if (pos >= 0) { + label = state.src.slice(start, pos++); + } else { + pos = labelEnd + 1; + } + } else { + pos = labelEnd + 1; + } + + // covers label === '' and label === undefined + // (collapsed reference link and shortcut reference link respectively) + if (!label) { label = state.src.slice(labelStart, labelEnd); } + + ref = state.env.references[normalizeReference(label)]; + if (!ref) { + state.pos = oldPos; + return false; + } + href = ref.href; + title = ref.title; + } + + // + // We found the end of the link, and know for a fact it's a valid link; + // so all that's left to do is to call tokenizer. + // + if (!silent) { + state.pos = labelStart; + state.posMax = labelEnd; + + token = state.push('link_open', 'a', 1); + token.attrs = attrs = [ [ 'href', href ] ]; + if (title) { + attrs.push([ 'title', title ]); + } + + state.md.inline.tokenize(state); + + token = state.push('link_close', 'a', -1); + } + + state.pos = pos; + state.posMax = max; + return true; +}; diff --git a/node_modules/markdown-it/lib/rules_inline/newline.js b/node_modules/markdown-it/lib/rules_inline/newline.js new file mode 100644 index 0000000..9eeead4 --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/newline.js @@ -0,0 +1,46 @@ +// Proceess '\n' + +'use strict'; + +var isSpace = require('../common/utils').isSpace; + + +module.exports = function newline(state, silent) { + var pmax, max, ws, pos = state.pos; + + if (state.src.charCodeAt(pos) !== 0x0A/* \n */) { return false; } + + pmax = state.pending.length - 1; + max = state.posMax; + + // ' \n' -> hardbreak + // Lookup in pending chars is bad practice! Don't copy to other rules! + // Pending string is stored in concat mode, indexed lookups will cause + // convertion to flat mode. + if (!silent) { + if (pmax >= 0 && state.pending.charCodeAt(pmax) === 0x20) { + if (pmax >= 1 && state.pending.charCodeAt(pmax - 1) === 0x20) { + // Find whitespaces tail of pending chars. + ws = pmax - 1; + while (ws >= 1 && state.pending.charCodeAt(ws - 1) === 0x20) ws--; + + state.pending = state.pending.slice(0, ws); + state.push('hardbreak', 'br', 0); + } else { + state.pending = state.pending.slice(0, -1); + state.push('softbreak', 'br', 0); + } + + } else { + state.push('softbreak', 'br', 0); + } + } + + pos++; + + // skip heading spaces for next line + while (pos < max && isSpace(state.src.charCodeAt(pos))) { pos++; } + + state.pos = pos; + return true; +}; diff --git a/node_modules/markdown-it/lib/rules_inline/state_inline.js b/node_modules/markdown-it/lib/rules_inline/state_inline.js new file mode 100644 index 0000000..efbf9bd --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/state_inline.js @@ -0,0 +1,154 @@ +// Inline parser state + +'use strict'; + + +var Token = require('../token'); +var isWhiteSpace = require('../common/utils').isWhiteSpace; +var isPunctChar = require('../common/utils').isPunctChar; +var isMdAsciiPunct = require('../common/utils').isMdAsciiPunct; + + +function StateInline(src, md, env, outTokens) { + this.src = src; + this.env = env; + this.md = md; + this.tokens = outTokens; + this.tokens_meta = Array(outTokens.length); + + this.pos = 0; + this.posMax = this.src.length; + this.level = 0; + this.pending = ''; + this.pendingLevel = 0; + + // Stores { start: end } pairs. Useful for backtrack + // optimization of pairs parse (emphasis, strikes). + this.cache = {}; + + // List of emphasis-like delimiters for current tag + this.delimiters = []; + + // Stack of delimiter lists for upper level tags + this._prev_delimiters = []; + + // backtick length => last seen position + this.backticks = {}; + this.backticksScanned = false; +} + + +// Flush pending text +// +StateInline.prototype.pushPending = function () { + var token = new Token('text', '', 0); + token.content = this.pending; + token.level = this.pendingLevel; + this.tokens.push(token); + this.pending = ''; + return token; +}; + + +// Push new token to "stream". +// If pending text exists - flush it as text token +// +StateInline.prototype.push = function (type, tag, nesting) { + if (this.pending) { + this.pushPending(); + } + + var token = new Token(type, tag, nesting); + var token_meta = null; + + if (nesting < 0) { + // closing tag + this.level--; + this.delimiters = this._prev_delimiters.pop(); + } + + token.level = this.level; + + if (nesting > 0) { + // opening tag + this.level++; + this._prev_delimiters.push(this.delimiters); + this.delimiters = []; + token_meta = { delimiters: this.delimiters }; + } + + this.pendingLevel = this.level; + this.tokens.push(token); + this.tokens_meta.push(token_meta); + return token; +}; + + +// Scan a sequence of emphasis-like markers, and determine whether +// it can start an emphasis sequence or end an emphasis sequence. +// +// - start - position to scan from (it should point at a valid marker); +// - canSplitWord - determine if these markers can be found inside a word +// +StateInline.prototype.scanDelims = function (start, canSplitWord) { + var pos = start, lastChar, nextChar, count, can_open, can_close, + isLastWhiteSpace, isLastPunctChar, + isNextWhiteSpace, isNextPunctChar, + left_flanking = true, + right_flanking = true, + max = this.posMax, + marker = this.src.charCodeAt(start); + + // treat beginning of the line as a whitespace + lastChar = start > 0 ? this.src.charCodeAt(start - 1) : 0x20; + + while (pos < max && this.src.charCodeAt(pos) === marker) { pos++; } + + count = pos - start; + + // treat end of the line as a whitespace + nextChar = pos < max ? this.src.charCodeAt(pos) : 0x20; + + isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar)); + isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar)); + + isLastWhiteSpace = isWhiteSpace(lastChar); + isNextWhiteSpace = isWhiteSpace(nextChar); + + if (isNextWhiteSpace) { + left_flanking = false; + } else if (isNextPunctChar) { + if (!(isLastWhiteSpace || isLastPunctChar)) { + left_flanking = false; + } + } + + if (isLastWhiteSpace) { + right_flanking = false; + } else if (isLastPunctChar) { + if (!(isNextWhiteSpace || isNextPunctChar)) { + right_flanking = false; + } + } + + if (!canSplitWord) { + can_open = left_flanking && (!right_flanking || isLastPunctChar); + can_close = right_flanking && (!left_flanking || isNextPunctChar); + } else { + can_open = left_flanking; + can_close = right_flanking; + } + + return { + can_open: can_open, + can_close: can_close, + length: count + }; +}; + + +// re-export Token class to use in block rules +StateInline.prototype.Token = Token; + + +module.exports = StateInline; diff --git a/node_modules/markdown-it/lib/rules_inline/strikethrough.js b/node_modules/markdown-it/lib/rules_inline/strikethrough.js new file mode 100644 index 0000000..3c35adf --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/strikethrough.js @@ -0,0 +1,130 @@ +// ~~strike through~~ +// +'use strict'; + + +// Insert each marker as a separate text token, and add it to delimiter list +// +module.exports.tokenize = function strikethrough(state, silent) { + var i, scanned, token, len, ch, + start = state.pos, + marker = state.src.charCodeAt(start); + + if (silent) { return false; } + + if (marker !== 0x7E/* ~ */) { return false; } + + scanned = state.scanDelims(state.pos, true); + len = scanned.length; + ch = String.fromCharCode(marker); + + if (len < 2) { return false; } + + if (len % 2) { + token = state.push('text', '', 0); + token.content = ch; + len--; + } + + for (i = 0; i < len; i += 2) { + token = state.push('text', '', 0); + token.content = ch + ch; + + state.delimiters.push({ + marker: marker, + length: 0, // disable "rule of 3" length checks meant for emphasis + token: state.tokens.length - 1, + end: -1, + open: scanned.can_open, + close: scanned.can_close + }); + } + + state.pos += scanned.length; + + return true; +}; + + +function postProcess(state, delimiters) { + var i, j, + startDelim, + endDelim, + token, + loneMarkers = [], + max = delimiters.length; + + for (i = 0; i < max; i++) { + startDelim = delimiters[i]; + + if (startDelim.marker !== 0x7E/* ~ */) { + continue; + } + + if (startDelim.end === -1) { + continue; + } + + endDelim = delimiters[startDelim.end]; + + token = state.tokens[startDelim.token]; + token.type = 's_open'; + token.tag = 's'; + token.nesting = 1; + token.markup = '~~'; + token.content = ''; + + token = state.tokens[endDelim.token]; + token.type = 's_close'; + token.tag = 's'; + token.nesting = -1; + token.markup = '~~'; + token.content = ''; + + if (state.tokens[endDelim.token - 1].type === 'text' && + state.tokens[endDelim.token - 1].content === '~') { + + loneMarkers.push(endDelim.token - 1); + } + } + + // If a marker sequence has an odd number of characters, it's splitted + // like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the + // start of the sequence. + // + // So, we have to move all those markers after subsequent s_close tags. + // + while (loneMarkers.length) { + i = loneMarkers.pop(); + j = i + 1; + + while (j < state.tokens.length && state.tokens[j].type === 's_close') { + j++; + } + + j--; + + if (i !== j) { + token = state.tokens[j]; + state.tokens[j] = state.tokens[i]; + state.tokens[i] = token; + } + } +} + + +// Walk through delimiter list and replace text tokens with tags +// +module.exports.postProcess = function strikethrough(state) { + var curr, + tokens_meta = state.tokens_meta, + max = state.tokens_meta.length; + + postProcess(state, state.delimiters); + + for (curr = 0; curr < max; curr++) { + if (tokens_meta[curr] && tokens_meta[curr].delimiters) { + postProcess(state, tokens_meta[curr].delimiters); + } + } +}; diff --git a/node_modules/markdown-it/lib/rules_inline/text.js b/node_modules/markdown-it/lib/rules_inline/text.js new file mode 100644 index 0000000..b19591e --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/text.js @@ -0,0 +1,89 @@ +// Skip text characters for text token, place those to pending buffer +// and increment current pos + +'use strict'; + + +// Rule to skip pure text +// '{}$%@~+=:' reserved for extentions + +// !, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \, ], ^, _, `, {, |, }, or ~ + +// !!!! Don't confuse with "Markdown ASCII Punctuation" chars +// http://spec.commonmark.org/0.15/#ascii-punctuation-character +function isTerminatorChar(ch) { + switch (ch) { + case 0x0A/* \n */: + case 0x21/* ! */: + case 0x23/* # */: + case 0x24/* $ */: + case 0x25/* % */: + case 0x26/* & */: + case 0x2A/* * */: + case 0x2B/* + */: + case 0x2D/* - */: + case 0x3A/* : */: + case 0x3C/* < */: + case 0x3D/* = */: + case 0x3E/* > */: + case 0x40/* @ */: + case 0x5B/* [ */: + case 0x5C/* \ */: + case 0x5D/* ] */: + case 0x5E/* ^ */: + case 0x5F/* _ */: + case 0x60/* ` */: + case 0x7B/* { */: + case 0x7D/* } */: + case 0x7E/* ~ */: + return true; + default: + return false; + } +} + +module.exports = function text(state, silent) { + var pos = state.pos; + + while (pos < state.posMax && !isTerminatorChar(state.src.charCodeAt(pos))) { + pos++; + } + + if (pos === state.pos) { return false; } + + if (!silent) { state.pending += state.src.slice(state.pos, pos); } + + state.pos = pos; + + return true; +}; + +// Alternative implementation, for memory. +// +// It costs 10% of performance, but allows extend terminators list, if place it +// to `ParcerInline` property. Probably, will switch to it sometime, such +// flexibility required. + +/* +var TERMINATOR_RE = /[\n!#$%&*+\-:<=>@[\\\]^_`{}~]/; + +module.exports = function text(state, silent) { + var pos = state.pos, + idx = state.src.slice(pos).search(TERMINATOR_RE); + + // first char is terminator -> empty text + if (idx === 0) { return false; } + + // no terminator -> text till end of string + if (idx < 0) { + if (!silent) { state.pending += state.src.slice(pos); } + state.pos = state.src.length; + return true; + } + + if (!silent) { state.pending += state.src.slice(pos, pos + idx); } + + state.pos += idx; + + return true; +};*/ diff --git a/node_modules/markdown-it/lib/rules_inline/text_collapse.js b/node_modules/markdown-it/lib/rules_inline/text_collapse.js new file mode 100644 index 0000000..390b0fe --- /dev/null +++ b/node_modules/markdown-it/lib/rules_inline/text_collapse.js @@ -0,0 +1,41 @@ +// Clean up tokens after emphasis and strikethrough postprocessing: +// merge adjacent text nodes into one and re-calculate all token levels +// +// This is necessary because initially emphasis delimiter markers (*, _, ~) +// are treated as their own separate text tokens. Then emphasis rule either +// leaves them as text (needed to merge with adjacent text) or turns them +// into opening/closing tags (which messes up levels inside). +// +'use strict'; + + +module.exports = function text_collapse(state) { + var curr, last, + level = 0, + tokens = state.tokens, + max = state.tokens.length; + + for (curr = last = 0; curr < max; curr++) { + // re-calculate levels after emphasis/strikethrough turns some text nodes + // into opening/closing tags + if (tokens[curr].nesting < 0) level--; // closing tag + tokens[curr].level = level; + if (tokens[curr].nesting > 0) level++; // opening tag + + if (tokens[curr].type === 'text' && + curr + 1 < max && + tokens[curr + 1].type === 'text') { + + // collapse two adjacent text nodes + tokens[curr + 1].content = tokens[curr].content + tokens[curr + 1].content; + } else { + if (curr !== last) { tokens[last] = tokens[curr]; } + + last++; + } + } + + if (curr !== last) { + tokens.length = last; + } +}; |