summaryrefslogtreecommitdiff
path: root/node_modules/markdown-it/lib/parser_inline.js
diff options
context:
space:
mode:
authorMinteck <contact@minteck.org>2022-01-20 13:43:34 +0100
committerMinteck <contact@minteck.org>2022-01-20 13:43:34 +0100
commitc2aa7bf38fb30de2d04f87f8e7780e4c768ae6b1 (patch)
tree226598e8d17d20e3721358f7c60b1cc6b851163a /node_modules/markdown-it/lib/parser_inline.js
downloadcobalt-c2aa7bf38fb30de2d04f87f8e7780e4c768ae6b1.tar.gz
cobalt-c2aa7bf38fb30de2d04f87f8e7780e4c768ae6b1.tar.bz2
cobalt-c2aa7bf38fb30de2d04f87f8e7780e4c768ae6b1.zip
Initial commit
Diffstat (limited to 'node_modules/markdown-it/lib/parser_inline.js')
-rw-r--r--node_modules/markdown-it/lib/parser_inline.js177
1 files changed, 177 insertions, 0 deletions
diff --git a/node_modules/markdown-it/lib/parser_inline.js b/node_modules/markdown-it/lib/parser_inline.js
new file mode 100644
index 0000000..c8e66d3
--- /dev/null
+++ b/node_modules/markdown-it/lib/parser_inline.js
@@ -0,0 +1,177 @@
+/** internal
+ * class ParserInline
+ *
+ * Tokenizes paragraph content.
+ **/
+'use strict';
+
+
+var Ruler = require('./ruler');
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Parser rules
+
+var _rules = [
+ [ 'text', require('./rules_inline/text') ],
+ [ 'newline', require('./rules_inline/newline') ],
+ [ 'escape', require('./rules_inline/escape') ],
+ [ 'backticks', require('./rules_inline/backticks') ],
+ [ 'strikethrough', require('./rules_inline/strikethrough').tokenize ],
+ [ 'emphasis', require('./rules_inline/emphasis').tokenize ],
+ [ 'link', require('./rules_inline/link') ],
+ [ 'image', require('./rules_inline/image') ],
+ [ 'autolink', require('./rules_inline/autolink') ],
+ [ 'html_inline', require('./rules_inline/html_inline') ],
+ [ 'entity', require('./rules_inline/entity') ]
+];
+
+var _rules2 = [
+ [ 'balance_pairs', require('./rules_inline/balance_pairs') ],
+ [ 'strikethrough', require('./rules_inline/strikethrough').postProcess ],
+ [ 'emphasis', require('./rules_inline/emphasis').postProcess ],
+ [ 'text_collapse', require('./rules_inline/text_collapse') ]
+];
+
+
+/**
+ * new ParserInline()
+ **/
+function ParserInline() {
+ var i;
+
+ /**
+ * ParserInline#ruler -> Ruler
+ *
+ * [[Ruler]] instance. Keep configuration of inline rules.
+ **/
+ this.ruler = new Ruler();
+
+ for (i = 0; i < _rules.length; i++) {
+ this.ruler.push(_rules[i][0], _rules[i][1]);
+ }
+
+ /**
+ * ParserInline#ruler2 -> Ruler
+ *
+ * [[Ruler]] instance. Second ruler used for post-processing
+ * (e.g. in emphasis-like rules).
+ **/
+ this.ruler2 = new Ruler();
+
+ for (i = 0; i < _rules2.length; i++) {
+ this.ruler2.push(_rules2[i][0], _rules2[i][1]);
+ }
+}
+
+
+// Skip single token by running all rules in validation mode;
+// returns `true` if any rule reported success
+//
+ParserInline.prototype.skipToken = function (state) {
+ var ok, i, pos = state.pos,
+ rules = this.ruler.getRules(''),
+ len = rules.length,
+ maxNesting = state.md.options.maxNesting,
+ cache = state.cache;
+
+
+ if (typeof cache[pos] !== 'undefined') {
+ state.pos = cache[pos];
+ return;
+ }
+
+ if (state.level < maxNesting) {
+ for (i = 0; i < len; i++) {
+ // Increment state.level and decrement it later to limit recursion.
+ // It's harmless to do here, because no tokens are created. But ideally,
+ // we'd need a separate private state variable for this purpose.
+ //
+ state.level++;
+ ok = rules[i](state, true);
+ state.level--;
+
+ if (ok) { break; }
+ }
+ } else {
+ // Too much nesting, just skip until the end of the paragraph.
+ //
+ // NOTE: this will cause links to behave incorrectly in the following case,
+ // when an amount of `[` is exactly equal to `maxNesting + 1`:
+ //
+ // [[[[[[[[[[[[[[[[[[[[[foo]()
+ //
+ // TODO: remove this workaround when CM standard will allow nested links
+ // (we can replace it by preventing links from being parsed in
+ // validation mode)
+ //
+ state.pos = state.posMax;
+ }
+
+ if (!ok) { state.pos++; }
+ cache[pos] = state.pos;
+};
+
+
+// Generate tokens for input range
+//
+ParserInline.prototype.tokenize = function (state) {
+ var ok, i,
+ rules = this.ruler.getRules(''),
+ len = rules.length,
+ end = state.posMax,
+ maxNesting = state.md.options.maxNesting;
+
+ while (state.pos < end) {
+ // Try all possible rules.
+ // On success, rule should:
+ //
+ // - update `state.pos`
+ // - update `state.tokens`
+ // - return true
+
+ if (state.level < maxNesting) {
+ for (i = 0; i < len; i++) {
+ ok = rules[i](state, false);
+ if (ok) { break; }
+ }
+ }
+
+ if (ok) {
+ if (state.pos >= end) { break; }
+ continue;
+ }
+
+ state.pending += state.src[state.pos++];
+ }
+
+ if (state.pending) {
+ state.pushPending();
+ }
+};
+
+
+/**
+ * ParserInline.parse(str, md, env, outTokens)
+ *
+ * Process input string and push inline tokens into `outTokens`
+ **/
+ParserInline.prototype.parse = function (str, md, env, outTokens) {
+ var i, rules, len;
+ var state = new this.State(str, md, env, outTokens);
+
+ this.tokenize(state);
+
+ rules = this.ruler2.getRules('');
+ len = rules.length;
+
+ for (i = 0; i < len; i++) {
+ rules[i](state);
+ }
+};
+
+
+ParserInline.prototype.State = require('./rules_inline/state_inline');
+
+
+module.exports = ParserInline;