1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
import { CharFlags, CharTypes } from './charClassifier';
import { Chars } from '../chars';
import { Token } from '../token';
import { ParserState, Context } from '../common';
import { report, Errors } from '../errors';
import { advanceChar, LexerState, TokenLookup, scanSingleToken, scanNewLine, consumeLineFeed } from './';
import { decodeHTMLStrict } from './decodeHTML';
/**
* Scans JSX attribute value
*
* @param parser The parser instance
* @param context Context masks
*/
export function scanJSXAttributeValue(parser: ParserState, context: Context): Token {
parser.startPos = parser.tokenPos = parser.index;
parser.startColumn = parser.colPos = parser.column;
parser.startLine = parser.linePos = parser.line;
parser.token =
CharTypes[parser.currentChar] & CharFlags.StringLiteral
? scanJSXString(parser, context)
: scanSingleToken(parser, context, LexerState.None);
return parser.token;
}
/**
* Scans JSX string
*
* @param parser The parser object
*/
export function scanJSXString(parser: ParserState, context: Context): Token {
const quote = parser.currentChar;
let char = advanceChar(parser);
const start = parser.index;
while (char !== quote) {
if (parser.index >= parser.end) report(parser, Errors.UnterminatedString);
char = advanceChar(parser);
}
// check for unterminated string
if (char !== quote) report(parser, Errors.UnterminatedString);
parser.tokenValue = parser.source.slice(start, parser.index);
advanceChar(parser); // skip the quote
if (context & Context.OptionsRaw) parser.tokenRaw = parser.source.slice(parser.tokenPos, parser.index);
return Token.StringLiteral;
}
/**
* Scans JSX token
*
* @param parser The parser object
*/
export function scanJSXToken(parser: ParserState, context: Context): Token {
parser.startPos = parser.tokenPos = parser.index;
parser.startColumn = parser.colPos = parser.column;
parser.startLine = parser.linePos = parser.line;
if (parser.index >= parser.end) return (parser.token = Token.EOF);
const token = TokenLookup[parser.source.charCodeAt(parser.index)];
switch (token) {
// '<'
case Token.LessThan: {
advanceChar(parser);
if (parser.currentChar === Chars.Slash) {
advanceChar(parser);
parser.token = Token.JSXClose;
} else {
parser.token = Token.LessThan;
}
break;
}
// '{'
case Token.LeftBrace: {
advanceChar(parser);
parser.token = Token.LeftBrace;
break;
}
default: {
let state = LexerState.None;
while (parser.index < parser.end) {
const type = CharTypes[parser.source.charCodeAt(parser.index)];
if (type & CharFlags.CarriageReturn) {
state |= LexerState.NewLine | LexerState.LastIsCR;
scanNewLine(parser);
} else if (type & CharFlags.LineFeed) {
consumeLineFeed(parser, state);
state = (state & ~LexerState.LastIsCR) | LexerState.NewLine;
} else {
advanceChar(parser);
}
if (CharTypes[parser.currentChar] & CharFlags.JSXToken) break;
}
const raw = parser.source.slice(parser.tokenPos, parser.index);
if (context & Context.OptionsRaw) parser.tokenRaw = raw;
parser.tokenValue = decodeHTMLStrict(raw);
parser.token = Token.JSXText;
}
}
return parser.token;
}
/**
* Scans JSX identifier
*
* @param parser The parser instance
*/
export function scanJSXIdentifier(parser: ParserState): Token {
if ((parser.token & Token.IsIdentifier) === Token.IsIdentifier) {
const { index } = parser;
let char = parser.currentChar;
while (CharTypes[char] & (CharFlags.Hyphen | CharFlags.IdentifierPart)) {
char = advanceChar(parser);
}
parser.tokenValue += parser.source.slice(index, parser.index);
}
parser.token = Token.Identifier;
return parser.token;
}
|