summaryrefslogtreecommitdiff
path: root/node_modules/sucrase/dist/esm/parser/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'node_modules/sucrase/dist/esm/parser/plugins')
-rw-r--r--node_modules/sucrase/dist/esm/parser/plugins/flow.js1105
-rw-r--r--node_modules/sucrase/dist/esm/parser/plugins/jsx/index.js367
-rw-r--r--node_modules/sucrase/dist/esm/parser/plugins/jsx/xhtml.js256
-rw-r--r--node_modules/sucrase/dist/esm/parser/plugins/types.js37
-rw-r--r--node_modules/sucrase/dist/esm/parser/plugins/typescript.js1632
5 files changed, 3397 insertions, 0 deletions
diff --git a/node_modules/sucrase/dist/esm/parser/plugins/flow.js b/node_modules/sucrase/dist/esm/parser/plugins/flow.js
new file mode 100644
index 0000000..66295d1
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/plugins/flow.js
@@ -0,0 +1,1105 @@
+/* eslint max-len: 0 */
+
+import {
+ eat,
+ lookaheadType,
+ lookaheadTypeAndKeyword,
+ match,
+ next,
+ popTypeContext,
+ pushTypeContext,
+
+} from "../tokenizer/index";
+import {ContextualKeyword} from "../tokenizer/keywords";
+import {TokenType, TokenType as tt} from "../tokenizer/types";
+import {input, state} from "../traverser/base";
+import {
+ baseParseMaybeAssign,
+ baseParseSubscript,
+ baseParseSubscripts,
+ parseArrow,
+ parseArrowExpression,
+ parseCallExpressionArguments,
+ parseExprAtom,
+ parseExpression,
+ parseFunctionBody,
+ parseIdentifier,
+ parseLiteral,
+
+} from "../traverser/expression";
+import {
+ baseParseExportStar,
+ parseExport,
+ parseExportFrom,
+ parseExportSpecifiers,
+ parseFunctionParams,
+ parseImport,
+ parseStatement,
+} from "../traverser/statement";
+import {
+ canInsertSemicolon,
+ eatContextual,
+ expect,
+ expectContextual,
+ isContextual,
+ isLookaheadContextual,
+ semicolon,
+ unexpected,
+} from "../traverser/util";
+
+function isMaybeDefaultImport(lookahead) {
+ return (
+ (lookahead.type === tt.name || !!(lookahead.type & TokenType.IS_KEYWORD)) &&
+ lookahead.contextualKeyword !== ContextualKeyword._from
+ );
+}
+
+function flowParseTypeInitialiser(tok) {
+ const oldIsType = pushTypeContext(0);
+ expect(tok || tt.colon);
+ flowParseType();
+ popTypeContext(oldIsType);
+}
+
+function flowParsePredicate() {
+ expect(tt.modulo);
+ expectContextual(ContextualKeyword._checks);
+ if (eat(tt.parenL)) {
+ parseExpression();
+ expect(tt.parenR);
+ }
+}
+
+function flowParseTypeAndPredicateInitialiser() {
+ const oldIsType = pushTypeContext(0);
+ expect(tt.colon);
+ if (match(tt.modulo)) {
+ flowParsePredicate();
+ } else {
+ flowParseType();
+ if (match(tt.modulo)) {
+ flowParsePredicate();
+ }
+ }
+ popTypeContext(oldIsType);
+}
+
+function flowParseDeclareClass() {
+ next();
+ flowParseInterfaceish(/* isClass */ true);
+}
+
+function flowParseDeclareFunction() {
+ next();
+ parseIdentifier();
+
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ }
+
+ expect(tt.parenL);
+ flowParseFunctionTypeParams();
+ expect(tt.parenR);
+
+ flowParseTypeAndPredicateInitialiser();
+
+ semicolon();
+}
+
+function flowParseDeclare() {
+ if (match(tt._class)) {
+ flowParseDeclareClass();
+ } else if (match(tt._function)) {
+ flowParseDeclareFunction();
+ } else if (match(tt._var)) {
+ flowParseDeclareVariable();
+ } else if (eatContextual(ContextualKeyword._module)) {
+ if (eat(tt.dot)) {
+ flowParseDeclareModuleExports();
+ } else {
+ flowParseDeclareModule();
+ }
+ } else if (isContextual(ContextualKeyword._type)) {
+ flowParseDeclareTypeAlias();
+ } else if (isContextual(ContextualKeyword._opaque)) {
+ flowParseDeclareOpaqueType();
+ } else if (isContextual(ContextualKeyword._interface)) {
+ flowParseDeclareInterface();
+ } else if (match(tt._export)) {
+ flowParseDeclareExportDeclaration();
+ } else {
+ unexpected();
+ }
+}
+
+function flowParseDeclareVariable() {
+ next();
+ flowParseTypeAnnotatableIdentifier();
+ semicolon();
+}
+
+function flowParseDeclareModule() {
+ if (match(tt.string)) {
+ parseExprAtom();
+ } else {
+ parseIdentifier();
+ }
+
+ expect(tt.braceL);
+ while (!match(tt.braceR) && !state.error) {
+ if (match(tt._import)) {
+ next();
+ parseImport();
+ } else {
+ unexpected();
+ }
+ }
+ expect(tt.braceR);
+}
+
+function flowParseDeclareExportDeclaration() {
+ expect(tt._export);
+
+ if (eat(tt._default)) {
+ if (match(tt._function) || match(tt._class)) {
+ // declare export default class ...
+ // declare export default function ...
+ flowParseDeclare();
+ } else {
+ // declare export default [type];
+ flowParseType();
+ semicolon();
+ }
+ } else if (
+ match(tt._var) || // declare export var ...
+ match(tt._function) || // declare export function ...
+ match(tt._class) || // declare export class ...
+ isContextual(ContextualKeyword._opaque) // declare export opaque ..
+ ) {
+ flowParseDeclare();
+ } else if (
+ match(tt.star) || // declare export * from ''
+ match(tt.braceL) || // declare export {} ...
+ isContextual(ContextualKeyword._interface) || // declare export interface ...
+ isContextual(ContextualKeyword._type) || // declare export type ...
+ isContextual(ContextualKeyword._opaque) // declare export opaque type ...
+ ) {
+ parseExport();
+ } else {
+ unexpected();
+ }
+}
+
+function flowParseDeclareModuleExports() {
+ expectContextual(ContextualKeyword._exports);
+ flowParseTypeAnnotation();
+ semicolon();
+}
+
+function flowParseDeclareTypeAlias() {
+ next();
+ flowParseTypeAlias();
+}
+
+function flowParseDeclareOpaqueType() {
+ next();
+ flowParseOpaqueType(true);
+}
+
+function flowParseDeclareInterface() {
+ next();
+ flowParseInterfaceish();
+}
+
+// Interfaces
+
+function flowParseInterfaceish(isClass = false) {
+ flowParseRestrictedIdentifier();
+
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ }
+
+ if (eat(tt._extends)) {
+ do {
+ flowParseInterfaceExtends();
+ } while (!isClass && eat(tt.comma));
+ }
+
+ if (isContextual(ContextualKeyword._mixins)) {
+ next();
+ do {
+ flowParseInterfaceExtends();
+ } while (eat(tt.comma));
+ }
+
+ if (isContextual(ContextualKeyword._implements)) {
+ next();
+ do {
+ flowParseInterfaceExtends();
+ } while (eat(tt.comma));
+ }
+
+ flowParseObjectType(isClass, false, isClass);
+}
+
+function flowParseInterfaceExtends() {
+ flowParseQualifiedTypeIdentifier(false);
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterInstantiation();
+ }
+}
+
+function flowParseInterface() {
+ flowParseInterfaceish();
+}
+
+function flowParseRestrictedIdentifier() {
+ parseIdentifier();
+}
+
+function flowParseTypeAlias() {
+ flowParseRestrictedIdentifier();
+
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ }
+
+ flowParseTypeInitialiser(tt.eq);
+ semicolon();
+}
+
+function flowParseOpaqueType(declare) {
+ expectContextual(ContextualKeyword._type);
+ flowParseRestrictedIdentifier();
+
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ }
+
+ // Parse the supertype
+ if (match(tt.colon)) {
+ flowParseTypeInitialiser(tt.colon);
+ }
+
+ if (!declare) {
+ flowParseTypeInitialiser(tt.eq);
+ }
+ semicolon();
+}
+
+function flowParseTypeParameter() {
+ flowParseVariance();
+ flowParseTypeAnnotatableIdentifier();
+
+ if (eat(tt.eq)) {
+ flowParseType();
+ }
+}
+
+export function flowParseTypeParameterDeclaration() {
+ const oldIsType = pushTypeContext(0);
+ // istanbul ignore else: this condition is already checked at all call sites
+ if (match(tt.lessThan) || match(tt.typeParameterStart)) {
+ next();
+ } else {
+ unexpected();
+ }
+
+ do {
+ flowParseTypeParameter();
+ if (!match(tt.greaterThan)) {
+ expect(tt.comma);
+ }
+ } while (!match(tt.greaterThan) && !state.error);
+ expect(tt.greaterThan);
+ popTypeContext(oldIsType);
+}
+
+function flowParseTypeParameterInstantiation() {
+ const oldIsType = pushTypeContext(0);
+ expect(tt.lessThan);
+ while (!match(tt.greaterThan) && !state.error) {
+ flowParseType();
+ if (!match(tt.greaterThan)) {
+ expect(tt.comma);
+ }
+ }
+ expect(tt.greaterThan);
+ popTypeContext(oldIsType);
+}
+
+function flowParseInterfaceType() {
+ expectContextual(ContextualKeyword._interface);
+ if (eat(tt._extends)) {
+ do {
+ flowParseInterfaceExtends();
+ } while (eat(tt.comma));
+ }
+ flowParseObjectType(false, false, false);
+}
+
+function flowParseObjectPropertyKey() {
+ if (match(tt.num) || match(tt.string)) {
+ parseExprAtom();
+ } else {
+ parseIdentifier();
+ }
+}
+
+function flowParseObjectTypeIndexer() {
+ // Note: bracketL has already been consumed
+ if (lookaheadType() === tt.colon) {
+ flowParseObjectPropertyKey();
+ flowParseTypeInitialiser();
+ } else {
+ flowParseType();
+ }
+ expect(tt.bracketR);
+ flowParseTypeInitialiser();
+}
+
+function flowParseObjectTypeInternalSlot() {
+ // Note: both bracketL have already been consumed
+ flowParseObjectPropertyKey();
+ expect(tt.bracketR);
+ expect(tt.bracketR);
+ if (match(tt.lessThan) || match(tt.parenL)) {
+ flowParseObjectTypeMethodish();
+ } else {
+ eat(tt.question);
+ flowParseTypeInitialiser();
+ }
+}
+
+function flowParseObjectTypeMethodish() {
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ }
+
+ expect(tt.parenL);
+ while (!match(tt.parenR) && !match(tt.ellipsis) && !state.error) {
+ flowParseFunctionTypeParam();
+ if (!match(tt.parenR)) {
+ expect(tt.comma);
+ }
+ }
+
+ if (eat(tt.ellipsis)) {
+ flowParseFunctionTypeParam();
+ }
+ expect(tt.parenR);
+ flowParseTypeInitialiser();
+}
+
+function flowParseObjectTypeCallProperty() {
+ flowParseObjectTypeMethodish();
+}
+
+function flowParseObjectType(allowStatic, allowExact, allowProto) {
+ let endDelim;
+ if (allowExact && match(tt.braceBarL)) {
+ expect(tt.braceBarL);
+ endDelim = tt.braceBarR;
+ } else {
+ expect(tt.braceL);
+ endDelim = tt.braceR;
+ }
+
+ while (!match(endDelim) && !state.error) {
+ if (allowProto && isContextual(ContextualKeyword._proto)) {
+ const lookahead = lookaheadType();
+ if (lookahead !== tt.colon && lookahead !== tt.question) {
+ next();
+ allowStatic = false;
+ }
+ }
+ if (allowStatic && isContextual(ContextualKeyword._static)) {
+ const lookahead = lookaheadType();
+ if (lookahead !== tt.colon && lookahead !== tt.question) {
+ next();
+ }
+ }
+
+ flowParseVariance();
+
+ if (eat(tt.bracketL)) {
+ if (eat(tt.bracketL)) {
+ flowParseObjectTypeInternalSlot();
+ } else {
+ flowParseObjectTypeIndexer();
+ }
+ } else if (match(tt.parenL) || match(tt.lessThan)) {
+ flowParseObjectTypeCallProperty();
+ } else {
+ if (isContextual(ContextualKeyword._get) || isContextual(ContextualKeyword._set)) {
+ const lookahead = lookaheadType();
+ if (lookahead === tt.name || lookahead === tt.string || lookahead === tt.num) {
+ next();
+ }
+ }
+
+ flowParseObjectTypeProperty();
+ }
+
+ flowObjectTypeSemicolon();
+ }
+
+ expect(endDelim);
+}
+
+function flowParseObjectTypeProperty() {
+ if (match(tt.ellipsis)) {
+ expect(tt.ellipsis);
+ if (!eat(tt.comma)) {
+ eat(tt.semi);
+ }
+ // Explicit inexact object syntax.
+ if (match(tt.braceR)) {
+ return;
+ }
+ flowParseType();
+ } else {
+ flowParseObjectPropertyKey();
+ if (match(tt.lessThan) || match(tt.parenL)) {
+ // This is a method property
+ flowParseObjectTypeMethodish();
+ } else {
+ eat(tt.question);
+ flowParseTypeInitialiser();
+ }
+ }
+}
+
+function flowObjectTypeSemicolon() {
+ if (!eat(tt.semi) && !eat(tt.comma) && !match(tt.braceR) && !match(tt.braceBarR)) {
+ unexpected();
+ }
+}
+
+function flowParseQualifiedTypeIdentifier(initialIdAlreadyParsed) {
+ if (!initialIdAlreadyParsed) {
+ parseIdentifier();
+ }
+ while (eat(tt.dot)) {
+ parseIdentifier();
+ }
+}
+
+function flowParseGenericType() {
+ flowParseQualifiedTypeIdentifier(true);
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterInstantiation();
+ }
+}
+
+function flowParseTypeofType() {
+ expect(tt._typeof);
+ flowParsePrimaryType();
+}
+
+function flowParseTupleType() {
+ expect(tt.bracketL);
+ // We allow trailing commas
+ while (state.pos < input.length && !match(tt.bracketR)) {
+ flowParseType();
+ if (match(tt.bracketR)) {
+ break;
+ }
+ expect(tt.comma);
+ }
+ expect(tt.bracketR);
+}
+
+function flowParseFunctionTypeParam() {
+ const lookahead = lookaheadType();
+ if (lookahead === tt.colon || lookahead === tt.question) {
+ parseIdentifier();
+ eat(tt.question);
+ flowParseTypeInitialiser();
+ } else {
+ flowParseType();
+ }
+}
+
+function flowParseFunctionTypeParams() {
+ while (!match(tt.parenR) && !match(tt.ellipsis) && !state.error) {
+ flowParseFunctionTypeParam();
+ if (!match(tt.parenR)) {
+ expect(tt.comma);
+ }
+ }
+ if (eat(tt.ellipsis)) {
+ flowParseFunctionTypeParam();
+ }
+}
+
+// The parsing of types roughly parallels the parsing of expressions, and
+// primary types are kind of like primary expressions...they're the
+// primitives with which other types are constructed.
+function flowParsePrimaryType() {
+ let isGroupedType = false;
+ const oldNoAnonFunctionType = state.noAnonFunctionType;
+
+ switch (state.type) {
+ case tt.name: {
+ if (isContextual(ContextualKeyword._interface)) {
+ flowParseInterfaceType();
+ return;
+ }
+ parseIdentifier();
+ flowParseGenericType();
+ return;
+ }
+
+ case tt.braceL:
+ flowParseObjectType(false, false, false);
+ return;
+
+ case tt.braceBarL:
+ flowParseObjectType(false, true, false);
+ return;
+
+ case tt.bracketL:
+ flowParseTupleType();
+ return;
+
+ case tt.lessThan:
+ flowParseTypeParameterDeclaration();
+ expect(tt.parenL);
+ flowParseFunctionTypeParams();
+ expect(tt.parenR);
+ expect(tt.arrow);
+ flowParseType();
+ return;
+
+ case tt.parenL:
+ next();
+
+ // Check to see if this is actually a grouped type
+ if (!match(tt.parenR) && !match(tt.ellipsis)) {
+ if (match(tt.name)) {
+ const token = lookaheadType();
+ isGroupedType = token !== tt.question && token !== tt.colon;
+ } else {
+ isGroupedType = true;
+ }
+ }
+
+ if (isGroupedType) {
+ state.noAnonFunctionType = false;
+ flowParseType();
+ state.noAnonFunctionType = oldNoAnonFunctionType;
+
+ // A `,` or a `) =>` means this is an anonymous function type
+ if (
+ state.noAnonFunctionType ||
+ !(match(tt.comma) || (match(tt.parenR) && lookaheadType() === tt.arrow))
+ ) {
+ expect(tt.parenR);
+ return;
+ } else {
+ // Eat a comma if there is one
+ eat(tt.comma);
+ }
+ }
+
+ flowParseFunctionTypeParams();
+
+ expect(tt.parenR);
+ expect(tt.arrow);
+ flowParseType();
+ return;
+
+ case tt.minus:
+ next();
+ parseLiteral();
+ return;
+
+ case tt.string:
+ case tt.num:
+ case tt._true:
+ case tt._false:
+ case tt._null:
+ case tt._this:
+ case tt._void:
+ case tt.star:
+ next();
+ return;
+
+ default:
+ if (state.type === tt._typeof) {
+ flowParseTypeofType();
+ return;
+ } else if (state.type & TokenType.IS_KEYWORD) {
+ next();
+ state.tokens[state.tokens.length - 1].type = tt.name;
+ return;
+ }
+ }
+
+ unexpected();
+}
+
+function flowParsePostfixType() {
+ flowParsePrimaryType();
+ while (!canInsertSemicolon() && (match(tt.bracketL) || match(tt.questionDot))) {
+ eat(tt.questionDot);
+ expect(tt.bracketL);
+ if (eat(tt.bracketR)) {
+ // Array type
+ } else {
+ // Indexed access type
+ flowParseType();
+ expect(tt.bracketR);
+ }
+ }
+}
+
+function flowParsePrefixType() {
+ if (eat(tt.question)) {
+ flowParsePrefixType();
+ } else {
+ flowParsePostfixType();
+ }
+}
+
+function flowParseAnonFunctionWithoutParens() {
+ flowParsePrefixType();
+ if (!state.noAnonFunctionType && eat(tt.arrow)) {
+ flowParseType();
+ }
+}
+
+function flowParseIntersectionType() {
+ eat(tt.bitwiseAND);
+ flowParseAnonFunctionWithoutParens();
+ while (eat(tt.bitwiseAND)) {
+ flowParseAnonFunctionWithoutParens();
+ }
+}
+
+function flowParseUnionType() {
+ eat(tt.bitwiseOR);
+ flowParseIntersectionType();
+ while (eat(tt.bitwiseOR)) {
+ flowParseIntersectionType();
+ }
+}
+
+function flowParseType() {
+ flowParseUnionType();
+}
+
+export function flowParseTypeAnnotation() {
+ flowParseTypeInitialiser();
+}
+
+function flowParseTypeAnnotatableIdentifier() {
+ parseIdentifier();
+ if (match(tt.colon)) {
+ flowParseTypeAnnotation();
+ }
+}
+
+export function flowParseVariance() {
+ if (match(tt.plus) || match(tt.minus)) {
+ next();
+ state.tokens[state.tokens.length - 1].isType = true;
+ }
+}
+
+// ==================================
+// Overrides
+// ==================================
+
+export function flowParseFunctionBodyAndFinish(funcContextId) {
+ // For arrow functions, `parseArrow` handles the return type itself.
+ if (match(tt.colon)) {
+ flowParseTypeAndPredicateInitialiser();
+ }
+
+ parseFunctionBody(false, funcContextId);
+}
+
+export function flowParseSubscript(
+ startTokenIndex,
+ noCalls,
+ stopState,
+) {
+ if (match(tt.questionDot) && lookaheadType() === tt.lessThan) {
+ if (noCalls) {
+ stopState.stop = true;
+ return;
+ }
+ next();
+ flowParseTypeParameterInstantiation();
+ expect(tt.parenL);
+ parseCallExpressionArguments();
+ return;
+ } else if (!noCalls && match(tt.lessThan)) {
+ const snapshot = state.snapshot();
+ flowParseTypeParameterInstantiation();
+ expect(tt.parenL);
+ parseCallExpressionArguments();
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ } else {
+ return;
+ }
+ }
+ baseParseSubscript(startTokenIndex, noCalls, stopState);
+}
+
+export function flowStartParseNewArguments() {
+ if (match(tt.lessThan)) {
+ const snapshot = state.snapshot();
+ flowParseTypeParameterInstantiation();
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ }
+ }
+}
+
+// interfaces
+export function flowTryParseStatement() {
+ if (match(tt.name) && state.contextualKeyword === ContextualKeyword._interface) {
+ const oldIsType = pushTypeContext(0);
+ next();
+ flowParseInterface();
+ popTypeContext(oldIsType);
+ return true;
+ } else if (isContextual(ContextualKeyword._enum)) {
+ flowParseEnumDeclaration();
+ return true;
+ }
+ return false;
+}
+
+export function flowTryParseExportDefaultExpression() {
+ if (isContextual(ContextualKeyword._enum)) {
+ flowParseEnumDeclaration();
+ return true;
+ }
+ return false;
+}
+
+// declares, interfaces and type aliases
+export function flowParseIdentifierStatement(contextualKeyword) {
+ if (contextualKeyword === ContextualKeyword._declare) {
+ if (
+ match(tt._class) ||
+ match(tt.name) ||
+ match(tt._function) ||
+ match(tt._var) ||
+ match(tt._export)
+ ) {
+ const oldIsType = pushTypeContext(1);
+ flowParseDeclare();
+ popTypeContext(oldIsType);
+ }
+ } else if (match(tt.name)) {
+ if (contextualKeyword === ContextualKeyword._interface) {
+ const oldIsType = pushTypeContext(1);
+ flowParseInterface();
+ popTypeContext(oldIsType);
+ } else if (contextualKeyword === ContextualKeyword._type) {
+ const oldIsType = pushTypeContext(1);
+ flowParseTypeAlias();
+ popTypeContext(oldIsType);
+ } else if (contextualKeyword === ContextualKeyword._opaque) {
+ const oldIsType = pushTypeContext(1);
+ flowParseOpaqueType(false);
+ popTypeContext(oldIsType);
+ }
+ }
+ semicolon();
+}
+
+// export type
+export function flowShouldParseExportDeclaration() {
+ return (
+ isContextual(ContextualKeyword._type) ||
+ isContextual(ContextualKeyword._interface) ||
+ isContextual(ContextualKeyword._opaque) ||
+ isContextual(ContextualKeyword._enum)
+ );
+}
+
+export function flowShouldDisallowExportDefaultSpecifier() {
+ return (
+ match(tt.name) &&
+ (state.contextualKeyword === ContextualKeyword._type ||
+ state.contextualKeyword === ContextualKeyword._interface ||
+ state.contextualKeyword === ContextualKeyword._opaque ||
+ state.contextualKeyword === ContextualKeyword._enum)
+ );
+}
+
+export function flowParseExportDeclaration() {
+ if (isContextual(ContextualKeyword._type)) {
+ const oldIsType = pushTypeContext(1);
+ next();
+
+ if (match(tt.braceL)) {
+ // export type { foo, bar };
+ parseExportSpecifiers();
+ parseExportFrom();
+ } else {
+ // export type Foo = Bar;
+ flowParseTypeAlias();
+ }
+ popTypeContext(oldIsType);
+ } else if (isContextual(ContextualKeyword._opaque)) {
+ const oldIsType = pushTypeContext(1);
+ next();
+ // export opaque type Foo = Bar;
+ flowParseOpaqueType(false);
+ popTypeContext(oldIsType);
+ } else if (isContextual(ContextualKeyword._interface)) {
+ const oldIsType = pushTypeContext(1);
+ next();
+ flowParseInterface();
+ popTypeContext(oldIsType);
+ } else {
+ parseStatement(true);
+ }
+}
+
+export function flowShouldParseExportStar() {
+ return match(tt.star) || (isContextual(ContextualKeyword._type) && lookaheadType() === tt.star);
+}
+
+export function flowParseExportStar() {
+ if (eatContextual(ContextualKeyword._type)) {
+ const oldIsType = pushTypeContext(2);
+ baseParseExportStar();
+ popTypeContext(oldIsType);
+ } else {
+ baseParseExportStar();
+ }
+}
+
+// parse a the super class type parameters and implements
+export function flowAfterParseClassSuper(hasSuper) {
+ if (hasSuper && match(tt.lessThan)) {
+ flowParseTypeParameterInstantiation();
+ }
+ if (isContextual(ContextualKeyword._implements)) {
+ const oldIsType = pushTypeContext(0);
+ next();
+ state.tokens[state.tokens.length - 1].type = tt._implements;
+ do {
+ flowParseRestrictedIdentifier();
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterInstantiation();
+ }
+ } while (eat(tt.comma));
+ popTypeContext(oldIsType);
+ }
+}
+
+// parse type parameters for object method shorthand
+export function flowStartParseObjPropValue() {
+ // method shorthand
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ if (!match(tt.parenL)) unexpected();
+ }
+}
+
+export function flowParseAssignableListItemTypes() {
+ const oldIsType = pushTypeContext(0);
+ eat(tt.question);
+ if (match(tt.colon)) {
+ flowParseTypeAnnotation();
+ }
+ popTypeContext(oldIsType);
+}
+
+// parse typeof and type imports
+export function flowStartParseImportSpecifiers() {
+ if (match(tt._typeof) || isContextual(ContextualKeyword._type)) {
+ const lh = lookaheadTypeAndKeyword();
+ if (isMaybeDefaultImport(lh) || lh.type === tt.braceL || lh.type === tt.star) {
+ next();
+ }
+ }
+}
+
+// parse import-type/typeof shorthand
+export function flowParseImportSpecifier() {
+ const isTypeKeyword =
+ state.contextualKeyword === ContextualKeyword._type || state.type === tt._typeof;
+ if (isTypeKeyword) {
+ next();
+ } else {
+ parseIdentifier();
+ }
+
+ if (isContextual(ContextualKeyword._as) && !isLookaheadContextual(ContextualKeyword._as)) {
+ parseIdentifier();
+ if (isTypeKeyword && !match(tt.name) && !(state.type & TokenType.IS_KEYWORD)) {
+ // `import {type as ,` or `import {type as }`
+ } else {
+ // `import {type as foo`
+ parseIdentifier();
+ }
+ } else {
+ if (isTypeKeyword && (match(tt.name) || !!(state.type & TokenType.IS_KEYWORD))) {
+ // `import {type foo`
+ parseIdentifier();
+ }
+ if (eatContextual(ContextualKeyword._as)) {
+ parseIdentifier();
+ }
+ }
+}
+
+// parse function type parameters - function foo<T>() {}
+export function flowStartParseFunctionParams() {
+ // Originally this checked if the method is a getter/setter, but if it was, we'd crash soon
+ // anyway, so don't try to propagate that information.
+ if (match(tt.lessThan)) {
+ const oldIsType = pushTypeContext(0);
+ flowParseTypeParameterDeclaration();
+ popTypeContext(oldIsType);
+ }
+}
+
+// parse flow type annotations on variable declarator heads - let foo: string = bar
+export function flowAfterParseVarHead() {
+ if (match(tt.colon)) {
+ flowParseTypeAnnotation();
+ }
+}
+
+// parse the return type of an async arrow function - let foo = (async (): number => {});
+export function flowStartParseAsyncArrowFromCallExpression() {
+ if (match(tt.colon)) {
+ const oldNoAnonFunctionType = state.noAnonFunctionType;
+ state.noAnonFunctionType = true;
+ flowParseTypeAnnotation();
+ state.noAnonFunctionType = oldNoAnonFunctionType;
+ }
+}
+
+// We need to support type parameter declarations for arrow functions. This
+// is tricky. There are three situations we need to handle
+//
+// 1. This is either JSX or an arrow function. We'll try JSX first. If that
+// fails, we'll try an arrow function. If that fails, we'll throw the JSX
+// error.
+// 2. This is an arrow function. We'll parse the type parameter declaration,
+// parse the rest, make sure the rest is an arrow function, and go from
+// there
+// 3. This is neither. Just call the super method
+export function flowParseMaybeAssign(noIn, isWithinParens) {
+ if (match(tt.lessThan)) {
+ const snapshot = state.snapshot();
+ let wasArrow = baseParseMaybeAssign(noIn, isWithinParens);
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ state.type = tt.typeParameterStart;
+ } else {
+ return wasArrow;
+ }
+
+ const oldIsType = pushTypeContext(0);
+ flowParseTypeParameterDeclaration();
+ popTypeContext(oldIsType);
+ wasArrow = baseParseMaybeAssign(noIn, isWithinParens);
+ if (wasArrow) {
+ return true;
+ }
+ unexpected();
+ }
+
+ return baseParseMaybeAssign(noIn, isWithinParens);
+}
+
+// handle return types for arrow functions
+export function flowParseArrow() {
+ if (match(tt.colon)) {
+ const oldIsType = pushTypeContext(0);
+ const snapshot = state.snapshot();
+
+ const oldNoAnonFunctionType = state.noAnonFunctionType;
+ state.noAnonFunctionType = true;
+ flowParseTypeAndPredicateInitialiser();
+ state.noAnonFunctionType = oldNoAnonFunctionType;
+
+ if (canInsertSemicolon()) unexpected();
+ if (!match(tt.arrow)) unexpected();
+
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ }
+ popTypeContext(oldIsType);
+ }
+ return eat(tt.arrow);
+}
+
+export function flowParseSubscripts(startTokenIndex, noCalls = false) {
+ if (
+ state.tokens[state.tokens.length - 1].contextualKeyword === ContextualKeyword._async &&
+ match(tt.lessThan)
+ ) {
+ const snapshot = state.snapshot();
+ const wasArrow = parseAsyncArrowWithTypeParameters();
+ if (wasArrow && !state.error) {
+ return;
+ }
+ state.restoreFromSnapshot(snapshot);
+ }
+
+ baseParseSubscripts(startTokenIndex, noCalls);
+}
+
+// Returns true if there was an arrow function here.
+function parseAsyncArrowWithTypeParameters() {
+ state.scopeDepth++;
+ const startTokenIndex = state.tokens.length;
+ parseFunctionParams();
+ if (!parseArrow()) {
+ return false;
+ }
+ parseArrowExpression(startTokenIndex);
+ return true;
+}
+
+function flowParseEnumDeclaration() {
+ expectContextual(ContextualKeyword._enum);
+ state.tokens[state.tokens.length - 1].type = tt._enum;
+ parseIdentifier();
+ flowParseEnumBody();
+}
+
+function flowParseEnumBody() {
+ if (eatContextual(ContextualKeyword._of)) {
+ next();
+ }
+ expect(tt.braceL);
+ flowParseEnumMembers();
+ expect(tt.braceR);
+}
+
+function flowParseEnumMembers() {
+ while (!match(tt.braceR) && !state.error) {
+ if (eat(tt.ellipsis)) {
+ break;
+ }
+ flowParseEnumMember();
+ if (!match(tt.braceR)) {
+ expect(tt.comma);
+ }
+ }
+}
+
+function flowParseEnumMember() {
+ parseIdentifier();
+ if (eat(tt.eq)) {
+ // Flow enum values are always just one token (a string, number, or boolean literal).
+ next();
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/parser/plugins/jsx/index.js b/node_modules/sucrase/dist/esm/parser/plugins/jsx/index.js
new file mode 100644
index 0000000..83f3983
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/plugins/jsx/index.js
@@ -0,0 +1,367 @@
+import {
+ eat,
+ finishToken,
+ getTokenFromCode,
+ IdentifierRole,
+ JSXRole,
+ match,
+ next,
+ skipSpace,
+ Token,
+} from "../../tokenizer/index";
+import {TokenType as tt} from "../../tokenizer/types";
+import {input, isTypeScriptEnabled, state} from "../../traverser/base";
+import {parseExpression, parseMaybeAssign} from "../../traverser/expression";
+import {expect, unexpected} from "../../traverser/util";
+import {charCodes} from "../../util/charcodes";
+import {IS_IDENTIFIER_CHAR, IS_IDENTIFIER_START} from "../../util/identifier";
+import {tsTryParseJSXTypeArgument} from "../typescript";
+
+/**
+ * Read token with JSX contents.
+ *
+ * In addition to detecting jsxTagStart and also regular tokens that might be
+ * part of an expression, this code detects the start and end of text ranges
+ * within JSX children. In order to properly count the number of children, we
+ * distinguish jsxText from jsxEmptyText, which is a text range that simplifies
+ * to the empty string after JSX whitespace trimming.
+ *
+ * It turns out that a JSX text range will simplify to the empty string if and
+ * only if both of these conditions hold:
+ * - The range consists entirely of whitespace characters (only counting space,
+ * tab, \r, and \n).
+ * - The range has at least one newline.
+ * This can be proven by analyzing any implementation of whitespace trimming,
+ * e.g. formatJSXTextLiteral in Sucrase or cleanJSXElementLiteralChild in Babel.
+ */
+function jsxReadToken() {
+ let sawNewline = false;
+ let sawNonWhitespace = false;
+ while (true) {
+ if (state.pos >= input.length) {
+ unexpected("Unterminated JSX contents");
+ return;
+ }
+
+ const ch = input.charCodeAt(state.pos);
+ if (ch === charCodes.lessThan || ch === charCodes.leftCurlyBrace) {
+ if (state.pos === state.start) {
+ if (ch === charCodes.lessThan) {
+ state.pos++;
+ finishToken(tt.jsxTagStart);
+ return;
+ }
+ getTokenFromCode(ch);
+ return;
+ }
+ if (sawNewline && !sawNonWhitespace) {
+ finishToken(tt.jsxEmptyText);
+ } else {
+ finishToken(tt.jsxText);
+ }
+ return;
+ }
+
+ // This is part of JSX text.
+ if (ch === charCodes.lineFeed) {
+ sawNewline = true;
+ } else if (ch !== charCodes.space && ch !== charCodes.carriageReturn && ch !== charCodes.tab) {
+ sawNonWhitespace = true;
+ }
+ state.pos++;
+ }
+}
+
+function jsxReadString(quote) {
+ state.pos++;
+ for (;;) {
+ if (state.pos >= input.length) {
+ unexpected("Unterminated string constant");
+ return;
+ }
+
+ const ch = input.charCodeAt(state.pos);
+ if (ch === quote) {
+ state.pos++;
+ break;
+ }
+ state.pos++;
+ }
+ finishToken(tt.string);
+}
+
+// Read a JSX identifier (valid tag or attribute name).
+//
+// Optimized version since JSX identifiers can't contain
+// escape characters and so can be read as single slice.
+// Also assumes that first character was already checked
+// by isIdentifierStart in readToken.
+
+function jsxReadWord() {
+ let ch;
+ do {
+ if (state.pos > input.length) {
+ unexpected("Unexpectedly reached the end of input.");
+ return;
+ }
+ ch = input.charCodeAt(++state.pos);
+ } while (IS_IDENTIFIER_CHAR[ch] || ch === charCodes.dash);
+ finishToken(tt.jsxName);
+}
+
+// Parse next token as JSX identifier
+function jsxParseIdentifier() {
+ nextJSXTagToken();
+}
+
+// Parse namespaced identifier.
+function jsxParseNamespacedName(identifierRole) {
+ jsxParseIdentifier();
+ if (!eat(tt.colon)) {
+ // Plain identifier, so this is an access.
+ state.tokens[state.tokens.length - 1].identifierRole = identifierRole;
+ return;
+ }
+ // Process the second half of the namespaced name.
+ jsxParseIdentifier();
+}
+
+// Parses element name in any form - namespaced, member
+// or single identifier.
+function jsxParseElementName() {
+ const firstTokenIndex = state.tokens.length;
+ jsxParseNamespacedName(IdentifierRole.Access);
+ let hadDot = false;
+ while (match(tt.dot)) {
+ hadDot = true;
+ nextJSXTagToken();
+ jsxParseIdentifier();
+ }
+ // For tags like <div> with a lowercase letter and no dots, the name is
+ // actually *not* an identifier access, since it's referring to a built-in
+ // tag name. Remove the identifier role in this case so that it's not
+ // accidentally transformed by the imports transform when preserving JSX.
+ if (!hadDot) {
+ const firstToken = state.tokens[firstTokenIndex];
+ const firstChar = input.charCodeAt(firstToken.start);
+ if (firstChar >= charCodes.lowercaseA && firstChar <= charCodes.lowercaseZ) {
+ firstToken.identifierRole = null;
+ }
+ }
+}
+
+// Parses any type of JSX attribute value.
+function jsxParseAttributeValue() {
+ switch (state.type) {
+ case tt.braceL:
+ next();
+ parseExpression();
+ nextJSXTagToken();
+ return;
+
+ case tt.jsxTagStart:
+ jsxParseElement();
+ nextJSXTagToken();
+ return;
+
+ case tt.string:
+ nextJSXTagToken();
+ return;
+
+ default:
+ unexpected("JSX value should be either an expression or a quoted JSX text");
+ }
+}
+
+// Parse JSX spread child, after already processing the {
+// Does not parse the closing }
+function jsxParseSpreadChild() {
+ expect(tt.ellipsis);
+ parseExpression();
+}
+
+// Parses JSX opening tag starting after "<".
+// Returns true if the tag was self-closing.
+// Does not parse the last token.
+function jsxParseOpeningElement(initialTokenIndex) {
+ if (match(tt.jsxTagEnd)) {
+ // This is an open-fragment.
+ return false;
+ }
+ jsxParseElementName();
+ if (isTypeScriptEnabled) {
+ tsTryParseJSXTypeArgument();
+ }
+ let hasSeenPropSpread = false;
+ while (!match(tt.slash) && !match(tt.jsxTagEnd) && !state.error) {
+ if (eat(tt.braceL)) {
+ hasSeenPropSpread = true;
+ expect(tt.ellipsis);
+ parseMaybeAssign();
+ // }
+ nextJSXTagToken();
+ continue;
+ }
+ if (
+ hasSeenPropSpread &&
+ state.end - state.start === 3 &&
+ input.charCodeAt(state.start) === charCodes.lowercaseK &&
+ input.charCodeAt(state.start + 1) === charCodes.lowercaseE &&
+ input.charCodeAt(state.start + 2) === charCodes.lowercaseY
+ ) {
+ state.tokens[initialTokenIndex].jsxRole = JSXRole.KeyAfterPropSpread;
+ }
+ jsxParseNamespacedName(IdentifierRole.ObjectKey);
+ if (match(tt.eq)) {
+ nextJSXTagToken();
+ jsxParseAttributeValue();
+ }
+ }
+ const isSelfClosing = match(tt.slash);
+ if (isSelfClosing) {
+ // /
+ nextJSXTagToken();
+ }
+ return isSelfClosing;
+}
+
+// Parses JSX closing tag starting after "</".
+// Does not parse the last token.
+function jsxParseClosingElement() {
+ if (match(tt.jsxTagEnd)) {
+ // Fragment syntax, so we immediately have a tag end.
+ return;
+ }
+ jsxParseElementName();
+}
+
+// Parses entire JSX element, including its opening tag
+// (starting after "<"), attributes, contents and closing tag.
+// Does not parse the last token.
+function jsxParseElementAt() {
+ const initialTokenIndex = state.tokens.length - 1;
+ state.tokens[initialTokenIndex].jsxRole = JSXRole.NoChildren;
+ let numExplicitChildren = 0;
+ const isSelfClosing = jsxParseOpeningElement(initialTokenIndex);
+ if (!isSelfClosing) {
+ nextJSXExprToken();
+ while (true) {
+ switch (state.type) {
+ case tt.jsxTagStart:
+ nextJSXTagToken();
+ if (match(tt.slash)) {
+ nextJSXTagToken();
+ jsxParseClosingElement();
+ // Key after prop spread takes precedence over number of children,
+ // since it means we switch to createElement, which doesn't care
+ // about number of children.
+ if (state.tokens[initialTokenIndex].jsxRole !== JSXRole.KeyAfterPropSpread) {
+ if (numExplicitChildren === 1) {
+ state.tokens[initialTokenIndex].jsxRole = JSXRole.OneChild;
+ } else if (numExplicitChildren > 1) {
+ state.tokens[initialTokenIndex].jsxRole = JSXRole.StaticChildren;
+ }
+ }
+ return;
+ }
+ numExplicitChildren++;
+ jsxParseElementAt();
+ nextJSXExprToken();
+ break;
+
+ case tt.jsxText:
+ numExplicitChildren++;
+ nextJSXExprToken();
+ break;
+
+ case tt.jsxEmptyText:
+ nextJSXExprToken();
+ break;
+
+ case tt.braceL:
+ next();
+ if (match(tt.ellipsis)) {
+ jsxParseSpreadChild();
+ nextJSXExprToken();
+ // Spread children are a mechanism to explicitly mark children as
+ // static, so count it as 2 children to satisfy the "more than one
+ // child" condition.
+ numExplicitChildren += 2;
+ } else {
+ // If we see {}, this is an empty pseudo-expression that doesn't
+ // count as a child.
+ if (!match(tt.braceR)) {
+ numExplicitChildren++;
+ parseExpression();
+ }
+ nextJSXExprToken();
+ }
+
+ break;
+
+ // istanbul ignore next - should never happen
+ default:
+ unexpected();
+ return;
+ }
+ }
+ }
+}
+
+// Parses entire JSX element from current position.
+// Does not parse the last token.
+export function jsxParseElement() {
+ nextJSXTagToken();
+ jsxParseElementAt();
+}
+
+// ==================================
+// Overrides
+// ==================================
+
+export function nextJSXTagToken() {
+ state.tokens.push(new Token());
+ skipSpace();
+ state.start = state.pos;
+ const code = input.charCodeAt(state.pos);
+
+ if (IS_IDENTIFIER_START[code]) {
+ jsxReadWord();
+ } else if (code === charCodes.quotationMark || code === charCodes.apostrophe) {
+ jsxReadString(code);
+ } else {
+ // The following tokens are just one character each.
+ ++state.pos;
+ switch (code) {
+ case charCodes.greaterThan:
+ finishToken(tt.jsxTagEnd);
+ break;
+ case charCodes.lessThan:
+ finishToken(tt.jsxTagStart);
+ break;
+ case charCodes.slash:
+ finishToken(tt.slash);
+ break;
+ case charCodes.equalsTo:
+ finishToken(tt.eq);
+ break;
+ case charCodes.leftCurlyBrace:
+ finishToken(tt.braceL);
+ break;
+ case charCodes.dot:
+ finishToken(tt.dot);
+ break;
+ case charCodes.colon:
+ finishToken(tt.colon);
+ break;
+ default:
+ unexpected();
+ }
+ }
+}
+
+function nextJSXExprToken() {
+ state.tokens.push(new Token());
+ state.start = state.pos;
+ jsxReadToken();
+}
diff --git a/node_modules/sucrase/dist/esm/parser/plugins/jsx/xhtml.js b/node_modules/sucrase/dist/esm/parser/plugins/jsx/xhtml.js
new file mode 100644
index 0000000..c6a0741
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/plugins/jsx/xhtml.js
@@ -0,0 +1,256 @@
+// Use a Map rather than object to avoid unexpected __proto__ access.
+export default new Map([
+ ["quot", "\u0022"],
+ ["amp", "&"],
+ ["apos", "\u0027"],
+ ["lt", "<"],
+ ["gt", ">"],
+ ["nbsp", "\u00A0"],
+ ["iexcl", "\u00A1"],
+ ["cent", "\u00A2"],
+ ["pound", "\u00A3"],
+ ["curren", "\u00A4"],
+ ["yen", "\u00A5"],
+ ["brvbar", "\u00A6"],
+ ["sect", "\u00A7"],
+ ["uml", "\u00A8"],
+ ["copy", "\u00A9"],
+ ["ordf", "\u00AA"],
+ ["laquo", "\u00AB"],
+ ["not", "\u00AC"],
+ ["shy", "\u00AD"],
+ ["reg", "\u00AE"],
+ ["macr", "\u00AF"],
+ ["deg", "\u00B0"],
+ ["plusmn", "\u00B1"],
+ ["sup2", "\u00B2"],
+ ["sup3", "\u00B3"],
+ ["acute", "\u00B4"],
+ ["micro", "\u00B5"],
+ ["para", "\u00B6"],
+ ["middot", "\u00B7"],
+ ["cedil", "\u00B8"],
+ ["sup1", "\u00B9"],
+ ["ordm", "\u00BA"],
+ ["raquo", "\u00BB"],
+ ["frac14", "\u00BC"],
+ ["frac12", "\u00BD"],
+ ["frac34", "\u00BE"],
+ ["iquest", "\u00BF"],
+ ["Agrave", "\u00C0"],
+ ["Aacute", "\u00C1"],
+ ["Acirc", "\u00C2"],
+ ["Atilde", "\u00C3"],
+ ["Auml", "\u00C4"],
+ ["Aring", "\u00C5"],
+ ["AElig", "\u00C6"],
+ ["Ccedil", "\u00C7"],
+ ["Egrave", "\u00C8"],
+ ["Eacute", "\u00C9"],
+ ["Ecirc", "\u00CA"],
+ ["Euml", "\u00CB"],
+ ["Igrave", "\u00CC"],
+ ["Iacute", "\u00CD"],
+ ["Icirc", "\u00CE"],
+ ["Iuml", "\u00CF"],
+ ["ETH", "\u00D0"],
+ ["Ntilde", "\u00D1"],
+ ["Ograve", "\u00D2"],
+ ["Oacute", "\u00D3"],
+ ["Ocirc", "\u00D4"],
+ ["Otilde", "\u00D5"],
+ ["Ouml", "\u00D6"],
+ ["times", "\u00D7"],
+ ["Oslash", "\u00D8"],
+ ["Ugrave", "\u00D9"],
+ ["Uacute", "\u00DA"],
+ ["Ucirc", "\u00DB"],
+ ["Uuml", "\u00DC"],
+ ["Yacute", "\u00DD"],
+ ["THORN", "\u00DE"],
+ ["szlig", "\u00DF"],
+ ["agrave", "\u00E0"],
+ ["aacute", "\u00E1"],
+ ["acirc", "\u00E2"],
+ ["atilde", "\u00E3"],
+ ["auml", "\u00E4"],
+ ["aring", "\u00E5"],
+ ["aelig", "\u00E6"],
+ ["ccedil", "\u00E7"],
+ ["egrave", "\u00E8"],
+ ["eacute", "\u00E9"],
+ ["ecirc", "\u00EA"],
+ ["euml", "\u00EB"],
+ ["igrave", "\u00EC"],
+ ["iacute", "\u00ED"],
+ ["icirc", "\u00EE"],
+ ["iuml", "\u00EF"],
+ ["eth", "\u00F0"],
+ ["ntilde", "\u00F1"],
+ ["ograve", "\u00F2"],
+ ["oacute", "\u00F3"],
+ ["ocirc", "\u00F4"],
+ ["otilde", "\u00F5"],
+ ["ouml", "\u00F6"],
+ ["divide", "\u00F7"],
+ ["oslash", "\u00F8"],
+ ["ugrave", "\u00F9"],
+ ["uacute", "\u00FA"],
+ ["ucirc", "\u00FB"],
+ ["uuml", "\u00FC"],
+ ["yacute", "\u00FD"],
+ ["thorn", "\u00FE"],
+ ["yuml", "\u00FF"],
+ ["OElig", "\u0152"],
+ ["oelig", "\u0153"],
+ ["Scaron", "\u0160"],
+ ["scaron", "\u0161"],
+ ["Yuml", "\u0178"],
+ ["fnof", "\u0192"],
+ ["circ", "\u02C6"],
+ ["tilde", "\u02DC"],
+ ["Alpha", "\u0391"],
+ ["Beta", "\u0392"],
+ ["Gamma", "\u0393"],
+ ["Delta", "\u0394"],
+ ["Epsilon", "\u0395"],
+ ["Zeta", "\u0396"],
+ ["Eta", "\u0397"],
+ ["Theta", "\u0398"],
+ ["Iota", "\u0399"],
+ ["Kappa", "\u039A"],
+ ["Lambda", "\u039B"],
+ ["Mu", "\u039C"],
+ ["Nu", "\u039D"],
+ ["Xi", "\u039E"],
+ ["Omicron", "\u039F"],
+ ["Pi", "\u03A0"],
+ ["Rho", "\u03A1"],
+ ["Sigma", "\u03A3"],
+ ["Tau", "\u03A4"],
+ ["Upsilon", "\u03A5"],
+ ["Phi", "\u03A6"],
+ ["Chi", "\u03A7"],
+ ["Psi", "\u03A8"],
+ ["Omega", "\u03A9"],
+ ["alpha", "\u03B1"],
+ ["beta", "\u03B2"],
+ ["gamma", "\u03B3"],
+ ["delta", "\u03B4"],
+ ["epsilon", "\u03B5"],
+ ["zeta", "\u03B6"],
+ ["eta", "\u03B7"],
+ ["theta", "\u03B8"],
+ ["iota", "\u03B9"],
+ ["kappa", "\u03BA"],
+ ["lambda", "\u03BB"],
+ ["mu", "\u03BC"],
+ ["nu", "\u03BD"],
+ ["xi", "\u03BE"],
+ ["omicron", "\u03BF"],
+ ["pi", "\u03C0"],
+ ["rho", "\u03C1"],
+ ["sigmaf", "\u03C2"],
+ ["sigma", "\u03C3"],
+ ["tau", "\u03C4"],
+ ["upsilon", "\u03C5"],
+ ["phi", "\u03C6"],
+ ["chi", "\u03C7"],
+ ["psi", "\u03C8"],
+ ["omega", "\u03C9"],
+ ["thetasym", "\u03D1"],
+ ["upsih", "\u03D2"],
+ ["piv", "\u03D6"],
+ ["ensp", "\u2002"],
+ ["emsp", "\u2003"],
+ ["thinsp", "\u2009"],
+ ["zwnj", "\u200C"],
+ ["zwj", "\u200D"],
+ ["lrm", "\u200E"],
+ ["rlm", "\u200F"],
+ ["ndash", "\u2013"],
+ ["mdash", "\u2014"],
+ ["lsquo", "\u2018"],
+ ["rsquo", "\u2019"],
+ ["sbquo", "\u201A"],
+ ["ldquo", "\u201C"],
+ ["rdquo", "\u201D"],
+ ["bdquo", "\u201E"],
+ ["dagger", "\u2020"],
+ ["Dagger", "\u2021"],
+ ["bull", "\u2022"],
+ ["hellip", "\u2026"],
+ ["permil", "\u2030"],
+ ["prime", "\u2032"],
+ ["Prime", "\u2033"],
+ ["lsaquo", "\u2039"],
+ ["rsaquo", "\u203A"],
+ ["oline", "\u203E"],
+ ["frasl", "\u2044"],
+ ["euro", "\u20AC"],
+ ["image", "\u2111"],
+ ["weierp", "\u2118"],
+ ["real", "\u211C"],
+ ["trade", "\u2122"],
+ ["alefsym", "\u2135"],
+ ["larr", "\u2190"],
+ ["uarr", "\u2191"],
+ ["rarr", "\u2192"],
+ ["darr", "\u2193"],
+ ["harr", "\u2194"],
+ ["crarr", "\u21B5"],
+ ["lArr", "\u21D0"],
+ ["uArr", "\u21D1"],
+ ["rArr", "\u21D2"],
+ ["dArr", "\u21D3"],
+ ["hArr", "\u21D4"],
+ ["forall", "\u2200"],
+ ["part", "\u2202"],
+ ["exist", "\u2203"],
+ ["empty", "\u2205"],
+ ["nabla", "\u2207"],
+ ["isin", "\u2208"],
+ ["notin", "\u2209"],
+ ["ni", "\u220B"],
+ ["prod", "\u220F"],
+ ["sum", "\u2211"],
+ ["minus", "\u2212"],
+ ["lowast", "\u2217"],
+ ["radic", "\u221A"],
+ ["prop", "\u221D"],
+ ["infin", "\u221E"],
+ ["ang", "\u2220"],
+ ["and", "\u2227"],
+ ["or", "\u2228"],
+ ["cap", "\u2229"],
+ ["cup", "\u222A"],
+ ["int", "\u222B"],
+ ["there4", "\u2234"],
+ ["sim", "\u223C"],
+ ["cong", "\u2245"],
+ ["asymp", "\u2248"],
+ ["ne", "\u2260"],
+ ["equiv", "\u2261"],
+ ["le", "\u2264"],
+ ["ge", "\u2265"],
+ ["sub", "\u2282"],
+ ["sup", "\u2283"],
+ ["nsub", "\u2284"],
+ ["sube", "\u2286"],
+ ["supe", "\u2287"],
+ ["oplus", "\u2295"],
+ ["otimes", "\u2297"],
+ ["perp", "\u22A5"],
+ ["sdot", "\u22C5"],
+ ["lceil", "\u2308"],
+ ["rceil", "\u2309"],
+ ["lfloor", "\u230A"],
+ ["rfloor", "\u230B"],
+ ["lang", "\u2329"],
+ ["rang", "\u232A"],
+ ["loz", "\u25CA"],
+ ["spades", "\u2660"],
+ ["clubs", "\u2663"],
+ ["hearts", "\u2665"],
+ ["diams", "\u2666"],
+]);
diff --git a/node_modules/sucrase/dist/esm/parser/plugins/types.js b/node_modules/sucrase/dist/esm/parser/plugins/types.js
new file mode 100644
index 0000000..78e4af4
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/plugins/types.js
@@ -0,0 +1,37 @@
+import {eatTypeToken, lookaheadType, match} from "../tokenizer/index";
+import {TokenType as tt} from "../tokenizer/types";
+import {isFlowEnabled, isTypeScriptEnabled} from "../traverser/base";
+import {baseParseConditional} from "../traverser/expression";
+import {flowParseTypeAnnotation} from "./flow";
+import {tsParseTypeAnnotation} from "./typescript";
+
+/**
+ * Common parser code for TypeScript and Flow.
+ */
+
+// An apparent conditional expression could actually be an optional parameter in an arrow function.
+export function typedParseConditional(noIn) {
+ // If we see ?:, this can't possibly be a valid conditional. typedParseParenItem will be called
+ // later to finish off the arrow parameter. We also need to handle bare ? tokens for optional
+ // parameters without type annotations, i.e. ?, and ?) .
+ if (match(tt.question)) {
+ const nextType = lookaheadType();
+ if (nextType === tt.colon || nextType === tt.comma || nextType === tt.parenR) {
+ return;
+ }
+ }
+ baseParseConditional(noIn);
+}
+
+// Note: These "type casts" are *not* valid TS expressions.
+// But we parse them here and change them when completing the arrow function.
+export function typedParseParenItem() {
+ eatTypeToken(tt.question);
+ if (match(tt.colon)) {
+ if (isTypeScriptEnabled) {
+ tsParseTypeAnnotation();
+ } else if (isFlowEnabled) {
+ flowParseTypeAnnotation();
+ }
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/parser/plugins/typescript.js b/node_modules/sucrase/dist/esm/parser/plugins/typescript.js
new file mode 100644
index 0000000..f64ca67
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/plugins/typescript.js
@@ -0,0 +1,1632 @@
+import {
+ eat,
+ finishToken,
+ IdentifierRole,
+ lookaheadType,
+ lookaheadTypeAndKeyword,
+ match,
+ next,
+ nextTemplateToken,
+ popTypeContext,
+ pushTypeContext,
+ rescan_gt,
+} from "../tokenizer/index";
+import {ContextualKeyword} from "../tokenizer/keywords";
+import {TokenType, TokenType as tt} from "../tokenizer/types";
+import {isJSXEnabled, state} from "../traverser/base";
+import {
+ atPossibleAsync,
+ baseParseMaybeAssign,
+ baseParseSubscript,
+ parseCallExpressionArguments,
+ parseExprAtom,
+ parseExpression,
+ parseFunctionBody,
+ parseIdentifier,
+ parseLiteral,
+ parseMaybeAssign,
+ parseMaybeUnary,
+ parsePropertyName,
+ parseTemplate,
+
+} from "../traverser/expression";
+import {parseBindingIdentifier, parseBindingList, parseImportedIdentifier} from "../traverser/lval";
+import {
+ baseParseMaybeDecoratorArguments,
+ parseBlockBody,
+ parseClass,
+ parseFunction,
+ parseFunctionParams,
+ parseStatement,
+ parseVarStatement,
+} from "../traverser/statement";
+import {
+ canInsertSemicolon,
+ eatContextual,
+ expect,
+ expectContextual,
+ hasPrecedingLineBreak,
+ isContextual,
+ isLineTerminator,
+ isLookaheadContextual,
+ semicolon,
+ unexpected,
+} from "../traverser/util";
+import {nextJSXTagToken} from "./jsx";
+
+function tsIsIdentifier() {
+ // TODO: actually a bit more complex in TypeScript, but shouldn't matter.
+ // See https://github.com/Microsoft/TypeScript/issues/15008
+ return match(tt.name);
+}
+
+function isLiteralPropertyName() {
+ return (
+ match(tt.name) ||
+ Boolean(state.type & TokenType.IS_KEYWORD) ||
+ match(tt.string) ||
+ match(tt.num) ||
+ match(tt.bigint) ||
+ match(tt.decimal)
+ );
+}
+
+function tsNextTokenCanFollowModifier() {
+ // Note: TypeScript's implementation is much more complicated because
+ // more things are considered modifiers there.
+ // This implementation only handles modifiers not handled by babylon itself. And "static".
+ // TODO: Would be nice to avoid lookahead. Want a hasLineBreakUpNext() method...
+ const snapshot = state.snapshot();
+
+ next();
+ const canFollowModifier =
+ (match(tt.bracketL) ||
+ match(tt.braceL) ||
+ match(tt.star) ||
+ match(tt.ellipsis) ||
+ match(tt.hash) ||
+ isLiteralPropertyName()) &&
+ !hasPrecedingLineBreak();
+
+ if (canFollowModifier) {
+ return true;
+ } else {
+ state.restoreFromSnapshot(snapshot);
+ return false;
+ }
+}
+
+export function tsParseModifiers(allowedModifiers) {
+ while (true) {
+ const modifier = tsParseModifier(allowedModifiers);
+ if (modifier === null) {
+ break;
+ }
+ }
+}
+
+/** Parses a modifier matching one the given modifier names. */
+export function tsParseModifier(
+ allowedModifiers,
+) {
+ if (!match(tt.name)) {
+ return null;
+ }
+
+ const modifier = state.contextualKeyword;
+ if (allowedModifiers.indexOf(modifier) !== -1 && tsNextTokenCanFollowModifier()) {
+ switch (modifier) {
+ case ContextualKeyword._readonly:
+ state.tokens[state.tokens.length - 1].type = tt._readonly;
+ break;
+ case ContextualKeyword._abstract:
+ state.tokens[state.tokens.length - 1].type = tt._abstract;
+ break;
+ case ContextualKeyword._static:
+ state.tokens[state.tokens.length - 1].type = tt._static;
+ break;
+ case ContextualKeyword._public:
+ state.tokens[state.tokens.length - 1].type = tt._public;
+ break;
+ case ContextualKeyword._private:
+ state.tokens[state.tokens.length - 1].type = tt._private;
+ break;
+ case ContextualKeyword._protected:
+ state.tokens[state.tokens.length - 1].type = tt._protected;
+ break;
+ case ContextualKeyword._override:
+ state.tokens[state.tokens.length - 1].type = tt._override;
+ break;
+ case ContextualKeyword._declare:
+ state.tokens[state.tokens.length - 1].type = tt._declare;
+ break;
+ default:
+ break;
+ }
+ return modifier;
+ }
+ return null;
+}
+
+function tsParseEntityName() {
+ parseIdentifier();
+ while (eat(tt.dot)) {
+ parseIdentifier();
+ }
+}
+
+function tsParseTypeReference() {
+ tsParseEntityName();
+ if (!hasPrecedingLineBreak() && match(tt.lessThan)) {
+ tsParseTypeArguments();
+ }
+}
+
+function tsParseThisTypePredicate() {
+ next();
+ tsParseTypeAnnotation();
+}
+
+function tsParseThisTypeNode() {
+ next();
+}
+
+function tsParseTypeQuery() {
+ expect(tt._typeof);
+ if (match(tt._import)) {
+ tsParseImportType();
+ } else {
+ tsParseEntityName();
+ }
+ if (!hasPrecedingLineBreak() && match(tt.lessThan)) {
+ tsParseTypeArguments();
+ }
+}
+
+function tsParseImportType() {
+ expect(tt._import);
+ expect(tt.parenL);
+ expect(tt.string);
+ expect(tt.parenR);
+ if (eat(tt.dot)) {
+ tsParseEntityName();
+ }
+ if (match(tt.lessThan)) {
+ tsParseTypeArguments();
+ }
+}
+
+function tsParseTypeParameter() {
+ eat(tt._const);
+ const hadIn = eat(tt._in);
+ const hadOut = eatContextual(ContextualKeyword._out);
+ eat(tt._const);
+ if ((hadIn || hadOut) && !match(tt.name)) {
+ // The "in" or "out" keyword must have actually been the type parameter
+ // name, so set it as the name.
+ state.tokens[state.tokens.length - 1].type = tt.name;
+ } else {
+ parseIdentifier();
+ }
+
+ if (eat(tt._extends)) {
+ tsParseType();
+ }
+ if (eat(tt.eq)) {
+ tsParseType();
+ }
+}
+
+export function tsTryParseTypeParameters() {
+ if (match(tt.lessThan)) {
+ tsParseTypeParameters();
+ }
+}
+
+function tsParseTypeParameters() {
+ const oldIsType = pushTypeContext(0);
+ if (match(tt.lessThan) || match(tt.typeParameterStart)) {
+ next();
+ } else {
+ unexpected();
+ }
+
+ while (!eat(tt.greaterThan) && !state.error) {
+ tsParseTypeParameter();
+ eat(tt.comma);
+ }
+ popTypeContext(oldIsType);
+}
+
+// Note: In TypeScript implementation we must provide `yieldContext` and `awaitContext`,
+// but here it's always false, because this is only used for types.
+function tsFillSignature(returnToken) {
+ // Arrow fns *must* have return token (`=>`). Normal functions can omit it.
+ const returnTokenRequired = returnToken === tt.arrow;
+ tsTryParseTypeParameters();
+ expect(tt.parenL);
+ // Create a scope even though we're doing type parsing so we don't accidentally
+ // treat params as top-level bindings.
+ state.scopeDepth++;
+ tsParseBindingListForSignature(false /* isBlockScope */);
+ state.scopeDepth--;
+ if (returnTokenRequired) {
+ tsParseTypeOrTypePredicateAnnotation(returnToken);
+ } else if (match(returnToken)) {
+ tsParseTypeOrTypePredicateAnnotation(returnToken);
+ }
+}
+
+function tsParseBindingListForSignature(isBlockScope) {
+ parseBindingList(tt.parenR, isBlockScope);
+}
+
+function tsParseTypeMemberSemicolon() {
+ if (!eat(tt.comma)) {
+ semicolon();
+ }
+}
+
+function tsParseSignatureMember() {
+ tsFillSignature(tt.colon);
+ tsParseTypeMemberSemicolon();
+}
+
+function tsIsUnambiguouslyIndexSignature() {
+ const snapshot = state.snapshot();
+ next(); // Skip '{'
+ const isIndexSignature = eat(tt.name) && match(tt.colon);
+ state.restoreFromSnapshot(snapshot);
+ return isIndexSignature;
+}
+
+function tsTryParseIndexSignature() {
+ if (!(match(tt.bracketL) && tsIsUnambiguouslyIndexSignature())) {
+ return false;
+ }
+
+ const oldIsType = pushTypeContext(0);
+
+ expect(tt.bracketL);
+ parseIdentifier();
+ tsParseTypeAnnotation();
+ expect(tt.bracketR);
+
+ tsTryParseTypeAnnotation();
+ tsParseTypeMemberSemicolon();
+
+ popTypeContext(oldIsType);
+ return true;
+}
+
+function tsParsePropertyOrMethodSignature(isReadonly) {
+ eat(tt.question);
+
+ if (!isReadonly && (match(tt.parenL) || match(tt.lessThan))) {
+ tsFillSignature(tt.colon);
+ tsParseTypeMemberSemicolon();
+ } else {
+ tsTryParseTypeAnnotation();
+ tsParseTypeMemberSemicolon();
+ }
+}
+
+function tsParseTypeMember() {
+ if (match(tt.parenL) || match(tt.lessThan)) {
+ // call signature
+ tsParseSignatureMember();
+ return;
+ }
+ if (match(tt._new)) {
+ next();
+ if (match(tt.parenL) || match(tt.lessThan)) {
+ // constructor signature
+ tsParseSignatureMember();
+ } else {
+ tsParsePropertyOrMethodSignature(false);
+ }
+ return;
+ }
+ const readonly = !!tsParseModifier([ContextualKeyword._readonly]);
+
+ const found = tsTryParseIndexSignature();
+ if (found) {
+ return;
+ }
+ if (
+ (isContextual(ContextualKeyword._get) || isContextual(ContextualKeyword._set)) &&
+ tsNextTokenCanFollowModifier()
+ ) {
+ // This is a getter/setter on a type. The tsNextTokenCanFollowModifier
+ // function already called next() for us, so continue parsing the name.
+ }
+ parsePropertyName(-1 /* Types don't need context IDs. */);
+ tsParsePropertyOrMethodSignature(readonly);
+}
+
+function tsParseTypeLiteral() {
+ tsParseObjectTypeMembers();
+}
+
+function tsParseObjectTypeMembers() {
+ expect(tt.braceL);
+ while (!eat(tt.braceR) && !state.error) {
+ tsParseTypeMember();
+ }
+}
+
+function tsLookaheadIsStartOfMappedType() {
+ const snapshot = state.snapshot();
+ const isStartOfMappedType = tsIsStartOfMappedType();
+ state.restoreFromSnapshot(snapshot);
+ return isStartOfMappedType;
+}
+
+function tsIsStartOfMappedType() {
+ next();
+ if (eat(tt.plus) || eat(tt.minus)) {
+ return isContextual(ContextualKeyword._readonly);
+ }
+ if (isContextual(ContextualKeyword._readonly)) {
+ next();
+ }
+ if (!match(tt.bracketL)) {
+ return false;
+ }
+ next();
+ if (!tsIsIdentifier()) {
+ return false;
+ }
+ next();
+ return match(tt._in);
+}
+
+function tsParseMappedTypeParameter() {
+ parseIdentifier();
+ expect(tt._in);
+ tsParseType();
+}
+
+function tsParseMappedType() {
+ expect(tt.braceL);
+ if (match(tt.plus) || match(tt.minus)) {
+ next();
+ expectContextual(ContextualKeyword._readonly);
+ } else {
+ eatContextual(ContextualKeyword._readonly);
+ }
+ expect(tt.bracketL);
+ tsParseMappedTypeParameter();
+ if (eatContextual(ContextualKeyword._as)) {
+ tsParseType();
+ }
+ expect(tt.bracketR);
+ if (match(tt.plus) || match(tt.minus)) {
+ next();
+ expect(tt.question);
+ } else {
+ eat(tt.question);
+ }
+ tsTryParseType();
+ semicolon();
+ expect(tt.braceR);
+}
+
+function tsParseTupleType() {
+ expect(tt.bracketL);
+ while (!eat(tt.bracketR) && !state.error) {
+ // Do not validate presence of either none or only labeled elements
+ tsParseTupleElementType();
+ eat(tt.comma);
+ }
+}
+
+function tsParseTupleElementType() {
+ // parses `...TsType[]`
+ if (eat(tt.ellipsis)) {
+ tsParseType();
+ } else {
+ // parses `TsType?`
+ tsParseType();
+ eat(tt.question);
+ }
+
+ // The type we parsed above was actually a label
+ if (eat(tt.colon)) {
+ // Labeled tuple types must affix the label with `...` or `?`, so no need to handle those here
+ tsParseType();
+ }
+}
+
+function tsParseParenthesizedType() {
+ expect(tt.parenL);
+ tsParseType();
+ expect(tt.parenR);
+}
+
+function tsParseTemplateLiteralType() {
+ // Finish `, read quasi
+ nextTemplateToken();
+ // Finish quasi, read ${
+ nextTemplateToken();
+ while (!match(tt.backQuote) && !state.error) {
+ expect(tt.dollarBraceL);
+ tsParseType();
+ // Finish }, read quasi
+ nextTemplateToken();
+ // Finish quasi, read either ${ or `
+ nextTemplateToken();
+ }
+ next();
+}
+
+var FunctionType; (function (FunctionType) {
+ const TSFunctionType = 0; FunctionType[FunctionType["TSFunctionType"] = TSFunctionType] = "TSFunctionType";
+ const TSConstructorType = TSFunctionType + 1; FunctionType[FunctionType["TSConstructorType"] = TSConstructorType] = "TSConstructorType";
+ const TSAbstractConstructorType = TSConstructorType + 1; FunctionType[FunctionType["TSAbstractConstructorType"] = TSAbstractConstructorType] = "TSAbstractConstructorType";
+})(FunctionType || (FunctionType = {}));
+
+function tsParseFunctionOrConstructorType(type) {
+ if (type === FunctionType.TSAbstractConstructorType) {
+ expectContextual(ContextualKeyword._abstract);
+ }
+ if (type === FunctionType.TSConstructorType || type === FunctionType.TSAbstractConstructorType) {
+ expect(tt._new);
+ }
+ const oldInDisallowConditionalTypesContext = state.inDisallowConditionalTypesContext;
+ state.inDisallowConditionalTypesContext = false;
+ tsFillSignature(tt.arrow);
+ state.inDisallowConditionalTypesContext = oldInDisallowConditionalTypesContext;
+}
+
+function tsParseNonArrayType() {
+ switch (state.type) {
+ case tt.name:
+ tsParseTypeReference();
+ return;
+ case tt._void:
+ case tt._null:
+ next();
+ return;
+ case tt.string:
+ case tt.num:
+ case tt.bigint:
+ case tt.decimal:
+ case tt._true:
+ case tt._false:
+ parseLiteral();
+ return;
+ case tt.minus:
+ next();
+ parseLiteral();
+ return;
+ case tt._this: {
+ tsParseThisTypeNode();
+ if (isContextual(ContextualKeyword._is) && !hasPrecedingLineBreak()) {
+ tsParseThisTypePredicate();
+ }
+ return;
+ }
+ case tt._typeof:
+ tsParseTypeQuery();
+ return;
+ case tt._import:
+ tsParseImportType();
+ return;
+ case tt.braceL:
+ if (tsLookaheadIsStartOfMappedType()) {
+ tsParseMappedType();
+ } else {
+ tsParseTypeLiteral();
+ }
+ return;
+ case tt.bracketL:
+ tsParseTupleType();
+ return;
+ case tt.parenL:
+ tsParseParenthesizedType();
+ return;
+ case tt.backQuote:
+ tsParseTemplateLiteralType();
+ return;
+ default:
+ if (state.type & TokenType.IS_KEYWORD) {
+ next();
+ state.tokens[state.tokens.length - 1].type = tt.name;
+ return;
+ }
+ break;
+ }
+
+ unexpected();
+}
+
+function tsParseArrayTypeOrHigher() {
+ tsParseNonArrayType();
+ while (!hasPrecedingLineBreak() && eat(tt.bracketL)) {
+ if (!eat(tt.bracketR)) {
+ // If we hit ] immediately, this is an array type, otherwise it's an indexed access type.
+ tsParseType();
+ expect(tt.bracketR);
+ }
+ }
+}
+
+function tsParseInferType() {
+ expectContextual(ContextualKeyword._infer);
+ parseIdentifier();
+ if (match(tt._extends)) {
+ // Infer type constraints introduce an ambiguity about whether the "extends"
+ // is a constraint for this infer type or is another conditional type.
+ const snapshot = state.snapshot();
+ expect(tt._extends);
+ const oldInDisallowConditionalTypesContext = state.inDisallowConditionalTypesContext;
+ state.inDisallowConditionalTypesContext = true;
+ tsParseType();
+ state.inDisallowConditionalTypesContext = oldInDisallowConditionalTypesContext;
+ if (state.error || (!state.inDisallowConditionalTypesContext && match(tt.question))) {
+ state.restoreFromSnapshot(snapshot);
+ }
+ }
+}
+
+function tsParseTypeOperatorOrHigher() {
+ if (
+ isContextual(ContextualKeyword._keyof) ||
+ isContextual(ContextualKeyword._unique) ||
+ isContextual(ContextualKeyword._readonly)
+ ) {
+ next();
+ tsParseTypeOperatorOrHigher();
+ } else if (isContextual(ContextualKeyword._infer)) {
+ tsParseInferType();
+ } else {
+ const oldInDisallowConditionalTypesContext = state.inDisallowConditionalTypesContext;
+ state.inDisallowConditionalTypesContext = false;
+ tsParseArrayTypeOrHigher();
+ state.inDisallowConditionalTypesContext = oldInDisallowConditionalTypesContext;
+ }
+}
+
+function tsParseIntersectionTypeOrHigher() {
+ eat(tt.bitwiseAND);
+ tsParseTypeOperatorOrHigher();
+ if (match(tt.bitwiseAND)) {
+ while (eat(tt.bitwiseAND)) {
+ tsParseTypeOperatorOrHigher();
+ }
+ }
+}
+
+function tsParseUnionTypeOrHigher() {
+ eat(tt.bitwiseOR);
+ tsParseIntersectionTypeOrHigher();
+ if (match(tt.bitwiseOR)) {
+ while (eat(tt.bitwiseOR)) {
+ tsParseIntersectionTypeOrHigher();
+ }
+ }
+}
+
+function tsIsStartOfFunctionType() {
+ if (match(tt.lessThan)) {
+ return true;
+ }
+ return match(tt.parenL) && tsLookaheadIsUnambiguouslyStartOfFunctionType();
+}
+
+function tsSkipParameterStart() {
+ if (match(tt.name) || match(tt._this)) {
+ next();
+ return true;
+ }
+ // If this is a possible array/object destructure, walk to the matching bracket/brace.
+ // The next token after will tell us definitively whether this is a function param.
+ if (match(tt.braceL) || match(tt.bracketL)) {
+ let depth = 1;
+ next();
+ while (depth > 0 && !state.error) {
+ if (match(tt.braceL) || match(tt.bracketL)) {
+ depth++;
+ } else if (match(tt.braceR) || match(tt.bracketR)) {
+ depth--;
+ }
+ next();
+ }
+ return true;
+ }
+ return false;
+}
+
+function tsLookaheadIsUnambiguouslyStartOfFunctionType() {
+ const snapshot = state.snapshot();
+ const isUnambiguouslyStartOfFunctionType = tsIsUnambiguouslyStartOfFunctionType();
+ state.restoreFromSnapshot(snapshot);
+ return isUnambiguouslyStartOfFunctionType;
+}
+
+function tsIsUnambiguouslyStartOfFunctionType() {
+ next();
+ if (match(tt.parenR) || match(tt.ellipsis)) {
+ // ( )
+ // ( ...
+ return true;
+ }
+ if (tsSkipParameterStart()) {
+ if (match(tt.colon) || match(tt.comma) || match(tt.question) || match(tt.eq)) {
+ // ( xxx :
+ // ( xxx ,
+ // ( xxx ?
+ // ( xxx =
+ return true;
+ }
+ if (match(tt.parenR)) {
+ next();
+ if (match(tt.arrow)) {
+ // ( xxx ) =>
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+function tsParseTypeOrTypePredicateAnnotation(returnToken) {
+ const oldIsType = pushTypeContext(0);
+ expect(returnToken);
+ const finishedReturn = tsParseTypePredicateOrAssertsPrefix();
+ if (!finishedReturn) {
+ tsParseType();
+ }
+ popTypeContext(oldIsType);
+}
+
+function tsTryParseTypeOrTypePredicateAnnotation() {
+ if (match(tt.colon)) {
+ tsParseTypeOrTypePredicateAnnotation(tt.colon);
+ }
+}
+
+export function tsTryParseTypeAnnotation() {
+ if (match(tt.colon)) {
+ tsParseTypeAnnotation();
+ }
+}
+
+function tsTryParseType() {
+ if (eat(tt.colon)) {
+ tsParseType();
+ }
+}
+
+/**
+ * Detect a few special return syntax cases: `x is T`, `asserts x`, `asserts x is T`,
+ * `asserts this is T`.
+ *
+ * Returns true if we parsed the return type, false if there's still a type to be parsed.
+ */
+function tsParseTypePredicateOrAssertsPrefix() {
+ const snapshot = state.snapshot();
+ if (isContextual(ContextualKeyword._asserts)) {
+ // Normally this is `asserts x is T`, but at this point, it might be `asserts is T` (a user-
+ // defined type guard on the `asserts` variable) or just a type called `asserts`.
+ next();
+ if (eatContextual(ContextualKeyword._is)) {
+ // If we see `asserts is`, then this must be of the form `asserts is T`, since
+ // `asserts is is T` isn't valid.
+ tsParseType();
+ return true;
+ } else if (tsIsIdentifier() || match(tt._this)) {
+ next();
+ if (eatContextual(ContextualKeyword._is)) {
+ // If we see `is`, then this is `asserts x is T`. Otherwise, it's `asserts x`.
+ tsParseType();
+ }
+ return true;
+ } else {
+ // Regular type, so bail out and start type parsing from scratch.
+ state.restoreFromSnapshot(snapshot);
+ return false;
+ }
+ } else if (tsIsIdentifier() || match(tt._this)) {
+ // This is a regular identifier, which may or may not have "is" after it.
+ next();
+ if (isContextual(ContextualKeyword._is) && !hasPrecedingLineBreak()) {
+ next();
+ tsParseType();
+ return true;
+ } else {
+ // Regular type, so bail out and start type parsing from scratch.
+ state.restoreFromSnapshot(snapshot);
+ return false;
+ }
+ }
+ return false;
+}
+
+export function tsParseTypeAnnotation() {
+ const oldIsType = pushTypeContext(0);
+ expect(tt.colon);
+ tsParseType();
+ popTypeContext(oldIsType);
+}
+
+export function tsParseType() {
+ tsParseNonConditionalType();
+ if (state.inDisallowConditionalTypesContext || hasPrecedingLineBreak() || !eat(tt._extends)) {
+ return;
+ }
+ // extends type
+ const oldInDisallowConditionalTypesContext = state.inDisallowConditionalTypesContext;
+ state.inDisallowConditionalTypesContext = true;
+ tsParseNonConditionalType();
+ state.inDisallowConditionalTypesContext = oldInDisallowConditionalTypesContext;
+
+ expect(tt.question);
+ // true type
+ tsParseType();
+ expect(tt.colon);
+ // false type
+ tsParseType();
+}
+
+function isAbstractConstructorSignature() {
+ return isContextual(ContextualKeyword._abstract) && lookaheadType() === tt._new;
+}
+
+export function tsParseNonConditionalType() {
+ if (tsIsStartOfFunctionType()) {
+ tsParseFunctionOrConstructorType(FunctionType.TSFunctionType);
+ return;
+ }
+ if (match(tt._new)) {
+ // As in `new () => Date`
+ tsParseFunctionOrConstructorType(FunctionType.TSConstructorType);
+ return;
+ } else if (isAbstractConstructorSignature()) {
+ // As in `abstract new () => Date`
+ tsParseFunctionOrConstructorType(FunctionType.TSAbstractConstructorType);
+ return;
+ }
+ tsParseUnionTypeOrHigher();
+}
+
+export function tsParseTypeAssertion() {
+ const oldIsType = pushTypeContext(1);
+ tsParseType();
+ expect(tt.greaterThan);
+ popTypeContext(oldIsType);
+ parseMaybeUnary();
+}
+
+export function tsTryParseJSXTypeArgument() {
+ if (eat(tt.jsxTagStart)) {
+ state.tokens[state.tokens.length - 1].type = tt.typeParameterStart;
+ const oldIsType = pushTypeContext(1);
+ while (!match(tt.greaterThan) && !state.error) {
+ tsParseType();
+ eat(tt.comma);
+ }
+ // Process >, but the one after needs to be parsed JSX-style.
+ nextJSXTagToken();
+ popTypeContext(oldIsType);
+ }
+}
+
+function tsParseHeritageClause() {
+ while (!match(tt.braceL) && !state.error) {
+ tsParseExpressionWithTypeArguments();
+ eat(tt.comma);
+ }
+}
+
+function tsParseExpressionWithTypeArguments() {
+ // Note: TS uses parseLeftHandSideExpressionOrHigher,
+ // then has grammar errors later if it's not an EntityName.
+ tsParseEntityName();
+ if (match(tt.lessThan)) {
+ tsParseTypeArguments();
+ }
+}
+
+function tsParseInterfaceDeclaration() {
+ parseBindingIdentifier(false);
+ tsTryParseTypeParameters();
+ if (eat(tt._extends)) {
+ tsParseHeritageClause();
+ }
+ tsParseObjectTypeMembers();
+}
+
+function tsParseTypeAliasDeclaration() {
+ parseBindingIdentifier(false);
+ tsTryParseTypeParameters();
+ expect(tt.eq);
+ tsParseType();
+ semicolon();
+}
+
+function tsParseEnumMember() {
+ // Computed property names are grammar errors in an enum, so accept just string literal or identifier.
+ if (match(tt.string)) {
+ parseLiteral();
+ } else {
+ parseIdentifier();
+ }
+ if (eat(tt.eq)) {
+ const eqIndex = state.tokens.length - 1;
+ parseMaybeAssign();
+ state.tokens[eqIndex].rhsEndIndex = state.tokens.length;
+ }
+}
+
+function tsParseEnumDeclaration() {
+ parseBindingIdentifier(false);
+ expect(tt.braceL);
+ while (!eat(tt.braceR) && !state.error) {
+ tsParseEnumMember();
+ eat(tt.comma);
+ }
+}
+
+function tsParseModuleBlock() {
+ expect(tt.braceL);
+ parseBlockBody(/* end */ tt.braceR);
+}
+
+function tsParseModuleOrNamespaceDeclaration() {
+ parseBindingIdentifier(false);
+ if (eat(tt.dot)) {
+ tsParseModuleOrNamespaceDeclaration();
+ } else {
+ tsParseModuleBlock();
+ }
+}
+
+function tsParseAmbientExternalModuleDeclaration() {
+ if (isContextual(ContextualKeyword._global)) {
+ parseIdentifier();
+ } else if (match(tt.string)) {
+ parseExprAtom();
+ } else {
+ unexpected();
+ }
+
+ if (match(tt.braceL)) {
+ tsParseModuleBlock();
+ } else {
+ semicolon();
+ }
+}
+
+export function tsParseImportEqualsDeclaration() {
+ parseImportedIdentifier();
+ expect(tt.eq);
+ tsParseModuleReference();
+ semicolon();
+}
+
+function tsIsExternalModuleReference() {
+ return isContextual(ContextualKeyword._require) && lookaheadType() === tt.parenL;
+}
+
+function tsParseModuleReference() {
+ if (tsIsExternalModuleReference()) {
+ tsParseExternalModuleReference();
+ } else {
+ tsParseEntityName();
+ }
+}
+
+function tsParseExternalModuleReference() {
+ expectContextual(ContextualKeyword._require);
+ expect(tt.parenL);
+ if (!match(tt.string)) {
+ unexpected();
+ }
+ parseLiteral();
+ expect(tt.parenR);
+}
+
+// Utilities
+
+// Returns true if a statement matched.
+function tsTryParseDeclare() {
+ if (isLineTerminator()) {
+ return false;
+ }
+ switch (state.type) {
+ case tt._function: {
+ const oldIsType = pushTypeContext(1);
+ next();
+ // We don't need to precisely get the function start here, since it's only used to mark
+ // the function as a type if it's bodiless, and it's already a type here.
+ const functionStart = state.start;
+ parseFunction(functionStart, /* isStatement */ true);
+ popTypeContext(oldIsType);
+ return true;
+ }
+ case tt._class: {
+ const oldIsType = pushTypeContext(1);
+ parseClass(/* isStatement */ true, /* optionalId */ false);
+ popTypeContext(oldIsType);
+ return true;
+ }
+ case tt._const: {
+ if (match(tt._const) && isLookaheadContextual(ContextualKeyword._enum)) {
+ const oldIsType = pushTypeContext(1);
+ // `const enum = 0;` not allowed because "enum" is a strict mode reserved word.
+ expect(tt._const);
+ expectContextual(ContextualKeyword._enum);
+ state.tokens[state.tokens.length - 1].type = tt._enum;
+ tsParseEnumDeclaration();
+ popTypeContext(oldIsType);
+ return true;
+ }
+ }
+ // falls through
+ case tt._var:
+ case tt._let: {
+ const oldIsType = pushTypeContext(1);
+ parseVarStatement(state.type !== tt._var);
+ popTypeContext(oldIsType);
+ return true;
+ }
+ case tt.name: {
+ const oldIsType = pushTypeContext(1);
+ const contextualKeyword = state.contextualKeyword;
+ let matched = false;
+ if (contextualKeyword === ContextualKeyword._global) {
+ tsParseAmbientExternalModuleDeclaration();
+ matched = true;
+ } else {
+ matched = tsParseDeclaration(contextualKeyword, /* isBeforeToken */ true);
+ }
+ popTypeContext(oldIsType);
+ return matched;
+ }
+ default:
+ return false;
+ }
+}
+
+// Note: this won't be called unless the keyword is allowed in `shouldParseExportDeclaration`.
+// Returns true if it matched a declaration.
+function tsTryParseExportDeclaration() {
+ return tsParseDeclaration(state.contextualKeyword, /* isBeforeToken */ true);
+}
+
+// Returns true if it matched a statement.
+function tsParseExpressionStatement(contextualKeyword) {
+ switch (contextualKeyword) {
+ case ContextualKeyword._declare: {
+ const declareTokenIndex = state.tokens.length - 1;
+ const matched = tsTryParseDeclare();
+ if (matched) {
+ state.tokens[declareTokenIndex].type = tt._declare;
+ return true;
+ }
+ break;
+ }
+ case ContextualKeyword._global:
+ // `global { }` (with no `declare`) may appear inside an ambient module declaration.
+ // Would like to use tsParseAmbientExternalModuleDeclaration here, but already ran past "global".
+ if (match(tt.braceL)) {
+ tsParseModuleBlock();
+ return true;
+ }
+ break;
+
+ default:
+ return tsParseDeclaration(contextualKeyword, /* isBeforeToken */ false);
+ }
+ return false;
+}
+
+/**
+ * Common code for parsing a declaration.
+ *
+ * isBeforeToken indicates that the current parser state is at the contextual
+ * keyword (and that it is not yet emitted) rather than reading the token after
+ * it. When isBeforeToken is true, we may be preceded by an `export` token and
+ * should include that token in a type context we create, e.g. to handle
+ * `export interface` or `export type`. (This is a bit of a hack and should be
+ * cleaned up at some point.)
+ *
+ * Returns true if it matched a declaration.
+ */
+function tsParseDeclaration(contextualKeyword, isBeforeToken) {
+ switch (contextualKeyword) {
+ case ContextualKeyword._abstract:
+ if (tsCheckLineTerminator(isBeforeToken) && match(tt._class)) {
+ state.tokens[state.tokens.length - 1].type = tt._abstract;
+ parseClass(/* isStatement */ true, /* optionalId */ false);
+ return true;
+ }
+ break;
+
+ case ContextualKeyword._enum:
+ if (tsCheckLineTerminator(isBeforeToken) && match(tt.name)) {
+ state.tokens[state.tokens.length - 1].type = tt._enum;
+ tsParseEnumDeclaration();
+ return true;
+ }
+ break;
+
+ case ContextualKeyword._interface:
+ if (tsCheckLineTerminator(isBeforeToken) && match(tt.name)) {
+ // `next` is true in "export" and "declare" contexts, so we want to remove that token
+ // as well.
+ const oldIsType = pushTypeContext(isBeforeToken ? 2 : 1);
+ tsParseInterfaceDeclaration();
+ popTypeContext(oldIsType);
+ return true;
+ }
+ break;
+
+ case ContextualKeyword._module:
+ if (tsCheckLineTerminator(isBeforeToken)) {
+ if (match(tt.string)) {
+ const oldIsType = pushTypeContext(isBeforeToken ? 2 : 1);
+ tsParseAmbientExternalModuleDeclaration();
+ popTypeContext(oldIsType);
+ return true;
+ } else if (match(tt.name)) {
+ const oldIsType = pushTypeContext(isBeforeToken ? 2 : 1);
+ tsParseModuleOrNamespaceDeclaration();
+ popTypeContext(oldIsType);
+ return true;
+ }
+ }
+ break;
+
+ case ContextualKeyword._namespace:
+ if (tsCheckLineTerminator(isBeforeToken) && match(tt.name)) {
+ const oldIsType = pushTypeContext(isBeforeToken ? 2 : 1);
+ tsParseModuleOrNamespaceDeclaration();
+ popTypeContext(oldIsType);
+ return true;
+ }
+ break;
+
+ case ContextualKeyword._type:
+ if (tsCheckLineTerminator(isBeforeToken) && match(tt.name)) {
+ const oldIsType = pushTypeContext(isBeforeToken ? 2 : 1);
+ tsParseTypeAliasDeclaration();
+ popTypeContext(oldIsType);
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return false;
+}
+
+function tsCheckLineTerminator(isBeforeToken) {
+ if (isBeforeToken) {
+ // Babel checks hasFollowingLineBreak here and returns false, but this
+ // doesn't actually come up, e.g. `export interface` can never be on its own
+ // line in valid code.
+ next();
+ return true;
+ } else {
+ return !isLineTerminator();
+ }
+}
+
+// Returns true if there was a generic async arrow function.
+function tsTryParseGenericAsyncArrowFunction() {
+ const snapshot = state.snapshot();
+
+ tsParseTypeParameters();
+ parseFunctionParams();
+ tsTryParseTypeOrTypePredicateAnnotation();
+ expect(tt.arrow);
+
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ return false;
+ }
+
+ parseFunctionBody(true);
+ return true;
+}
+
+/**
+ * If necessary, hack the tokenizer state so that this bitshift was actually a
+ * less-than token, then keep parsing. This should only be used in situations
+ * where we restore from snapshot on error (which reverts this change) or
+ * where bitshift would be illegal anyway (e.g. in a class "extends" clause).
+ *
+ * This hack is useful to handle situations like foo<<T>() => void>() where
+ * there can legitimately be two open-angle-brackets in a row in TS.
+ */
+function tsParseTypeArgumentsWithPossibleBitshift() {
+ if (state.type === tt.bitShiftL) {
+ state.pos -= 1;
+ finishToken(tt.lessThan);
+ }
+ tsParseTypeArguments();
+}
+
+function tsParseTypeArguments() {
+ const oldIsType = pushTypeContext(0);
+ expect(tt.lessThan);
+ while (!match(tt.greaterThan) && !state.error) {
+ tsParseType();
+ eat(tt.comma);
+ }
+ if (!oldIsType) {
+ // If the type arguments are present in an expression context, e.g.
+ // f<number>(), then the > sign should be tokenized as a non-type token.
+ // In particular, f(a < b, c >= d) should parse the >= as a single token,
+ // resulting in a syntax error and fallback to the non-type-args
+ // interpretation. In the success case, even though the > is tokenized as a
+ // non-type token, it still must be marked as a type token so that it is
+ // erased.
+ popTypeContext(oldIsType);
+ rescan_gt();
+ expect(tt.greaterThan);
+ state.tokens[state.tokens.length - 1].isType = true;
+ } else {
+ expect(tt.greaterThan);
+ popTypeContext(oldIsType);
+ }
+}
+
+export function tsIsDeclarationStart() {
+ if (match(tt.name)) {
+ switch (state.contextualKeyword) {
+ case ContextualKeyword._abstract:
+ case ContextualKeyword._declare:
+ case ContextualKeyword._enum:
+ case ContextualKeyword._interface:
+ case ContextualKeyword._module:
+ case ContextualKeyword._namespace:
+ case ContextualKeyword._type:
+ return true;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
+
+// ======================================================
+// OVERRIDES
+// ======================================================
+
+export function tsParseFunctionBodyAndFinish(functionStart, funcContextId) {
+ // For arrow functions, `parseArrow` handles the return type itself.
+ if (match(tt.colon)) {
+ tsParseTypeOrTypePredicateAnnotation(tt.colon);
+ }
+
+ // The original code checked the node type to make sure this function type allows a missing
+ // body, but we skip that to avoid sending around the node type. We instead just use the
+ // allowExpressionBody boolean to make sure it's not an arrow function.
+ if (!match(tt.braceL) && isLineTerminator()) {
+ // Retroactively mark the function declaration as a type.
+ let i = state.tokens.length - 1;
+ while (
+ i >= 0 &&
+ (state.tokens[i].start >= functionStart ||
+ state.tokens[i].type === tt._default ||
+ state.tokens[i].type === tt._export)
+ ) {
+ state.tokens[i].isType = true;
+ i--;
+ }
+ return;
+ }
+
+ parseFunctionBody(false, funcContextId);
+}
+
+export function tsParseSubscript(
+ startTokenIndex,
+ noCalls,
+ stopState,
+) {
+ if (!hasPrecedingLineBreak() && eat(tt.bang)) {
+ state.tokens[state.tokens.length - 1].type = tt.nonNullAssertion;
+ return;
+ }
+
+ if (match(tt.lessThan) || match(tt.bitShiftL)) {
+ // There are number of things we are going to "maybe" parse, like type arguments on
+ // tagged template expressions. If any of them fail, walk it back and continue.
+ const snapshot = state.snapshot();
+
+ if (!noCalls && atPossibleAsync()) {
+ // Almost certainly this is a generic async function `async <T>() => ...
+ // But it might be a call with a type argument `async<T>();`
+ const asyncArrowFn = tsTryParseGenericAsyncArrowFunction();
+ if (asyncArrowFn) {
+ return;
+ }
+ }
+ tsParseTypeArgumentsWithPossibleBitshift();
+ if (!noCalls && eat(tt.parenL)) {
+ // With f<T>(), the subscriptStartIndex marker is on the ( token.
+ state.tokens[state.tokens.length - 1].subscriptStartIndex = startTokenIndex;
+ parseCallExpressionArguments();
+ } else if (match(tt.backQuote)) {
+ // Tagged template with a type argument.
+ parseTemplate();
+ } else if (
+ // The remaining possible case is an instantiation expression, e.g.
+ // Array<number> . Check for a few cases that would disqualify it and
+ // cause us to bail out.
+ // a<b>>c is not (a<b>)>c, but a<(b>>c)
+ state.type === tt.greaterThan ||
+ // a<b>c is (a<b)>c
+ (state.type !== tt.parenL &&
+ Boolean(state.type & TokenType.IS_EXPRESSION_START) &&
+ !hasPrecedingLineBreak())
+ ) {
+ // Bail out. We have something like a<b>c, which is not an expression with
+ // type arguments but an (a < b) > c comparison.
+ unexpected();
+ }
+
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ } else {
+ return;
+ }
+ } else if (!noCalls && match(tt.questionDot) && lookaheadType() === tt.lessThan) {
+ // If we see f?.<, then this must be an optional call with a type argument.
+ next();
+ state.tokens[startTokenIndex].isOptionalChainStart = true;
+ // With f?.<T>(), the subscriptStartIndex marker is on the ?. token.
+ state.tokens[state.tokens.length - 1].subscriptStartIndex = startTokenIndex;
+
+ tsParseTypeArguments();
+ expect(tt.parenL);
+ parseCallExpressionArguments();
+ }
+ baseParseSubscript(startTokenIndex, noCalls, stopState);
+}
+
+export function tsTryParseExport() {
+ if (eat(tt._import)) {
+ // One of these cases:
+ // export import A = B;
+ // export import type A = require("A");
+ if (isContextual(ContextualKeyword._type) && lookaheadType() !== tt.eq) {
+ // Eat a `type` token, unless it's actually an identifier name.
+ expectContextual(ContextualKeyword._type);
+ }
+ tsParseImportEqualsDeclaration();
+ return true;
+ } else if (eat(tt.eq)) {
+ // `export = x;`
+ parseExpression();
+ semicolon();
+ return true;
+ } else if (eatContextual(ContextualKeyword._as)) {
+ // `export as namespace A;`
+ // See `parseNamespaceExportDeclaration` in TypeScript's own parser
+ expectContextual(ContextualKeyword._namespace);
+ parseIdentifier();
+ semicolon();
+ return true;
+ } else {
+ if (isContextual(ContextualKeyword._type)) {
+ const nextType = lookaheadType();
+ // export type {foo} from 'a';
+ // export type * from 'a';'
+ // export type * as ns from 'a';'
+ if (nextType === tt.braceL || nextType === tt.star) {
+ next();
+ }
+ }
+ return false;
+ }
+}
+
+/**
+ * Parse a TS import specifier, which may be prefixed with "type" and may be of
+ * the form `foo as bar`.
+ *
+ * The number of identifier-like tokens we see happens to be enough to uniquely
+ * identify the form, so simply count the number of identifiers rather than
+ * matching the words `type` or `as`. This is particularly important because
+ * `type` and `as` could each actually be plain identifiers rather than
+ * keywords.
+ */
+export function tsParseImportSpecifier() {
+ parseIdentifier();
+ if (match(tt.comma) || match(tt.braceR)) {
+ // import {foo}
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ImportDeclaration;
+ return;
+ }
+ parseIdentifier();
+ if (match(tt.comma) || match(tt.braceR)) {
+ // import {type foo}
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ImportDeclaration;
+ state.tokens[state.tokens.length - 2].isType = true;
+ state.tokens[state.tokens.length - 1].isType = true;
+ return;
+ }
+ parseIdentifier();
+ if (match(tt.comma) || match(tt.braceR)) {
+ // import {foo as bar}
+ state.tokens[state.tokens.length - 3].identifierRole = IdentifierRole.ImportAccess;
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ImportDeclaration;
+ return;
+ }
+ parseIdentifier();
+ // import {type foo as bar}
+ state.tokens[state.tokens.length - 3].identifierRole = IdentifierRole.ImportAccess;
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ImportDeclaration;
+ state.tokens[state.tokens.length - 4].isType = true;
+ state.tokens[state.tokens.length - 3].isType = true;
+ state.tokens[state.tokens.length - 2].isType = true;
+ state.tokens[state.tokens.length - 1].isType = true;
+}
+
+/**
+ * Just like named import specifiers, export specifiers can have from 1 to 4
+ * tokens, inclusive, and the number of tokens determines the role of each token.
+ */
+export function tsParseExportSpecifier() {
+ parseIdentifier();
+ if (match(tt.comma) || match(tt.braceR)) {
+ // export {foo}
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ExportAccess;
+ return;
+ }
+ parseIdentifier();
+ if (match(tt.comma) || match(tt.braceR)) {
+ // export {type foo}
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ExportAccess;
+ state.tokens[state.tokens.length - 2].isType = true;
+ state.tokens[state.tokens.length - 1].isType = true;
+ return;
+ }
+ parseIdentifier();
+ if (match(tt.comma) || match(tt.braceR)) {
+ // export {foo as bar}
+ state.tokens[state.tokens.length - 3].identifierRole = IdentifierRole.ExportAccess;
+ return;
+ }
+ parseIdentifier();
+ // export {type foo as bar}
+ state.tokens[state.tokens.length - 3].identifierRole = IdentifierRole.ExportAccess;
+ state.tokens[state.tokens.length - 4].isType = true;
+ state.tokens[state.tokens.length - 3].isType = true;
+ state.tokens[state.tokens.length - 2].isType = true;
+ state.tokens[state.tokens.length - 1].isType = true;
+}
+
+export function tsTryParseExportDefaultExpression() {
+ if (isContextual(ContextualKeyword._abstract) && lookaheadType() === tt._class) {
+ state.type = tt._abstract;
+ next(); // Skip "abstract"
+ parseClass(true, true);
+ return true;
+ }
+ if (isContextual(ContextualKeyword._interface)) {
+ // Make sure "export default" are considered type tokens so the whole thing is removed.
+ const oldIsType = pushTypeContext(2);
+ tsParseDeclaration(ContextualKeyword._interface, true);
+ popTypeContext(oldIsType);
+ return true;
+ }
+ return false;
+}
+
+export function tsTryParseStatementContent() {
+ if (state.type === tt._const) {
+ const ahead = lookaheadTypeAndKeyword();
+ if (ahead.type === tt.name && ahead.contextualKeyword === ContextualKeyword._enum) {
+ expect(tt._const);
+ expectContextual(ContextualKeyword._enum);
+ state.tokens[state.tokens.length - 1].type = tt._enum;
+ tsParseEnumDeclaration();
+ return true;
+ }
+ }
+ return false;
+}
+
+export function tsTryParseClassMemberWithIsStatic(isStatic) {
+ const memberStartIndexAfterStatic = state.tokens.length;
+ tsParseModifiers([
+ ContextualKeyword._abstract,
+ ContextualKeyword._readonly,
+ ContextualKeyword._declare,
+ ContextualKeyword._static,
+ ContextualKeyword._override,
+ ]);
+
+ const modifiersEndIndex = state.tokens.length;
+ const found = tsTryParseIndexSignature();
+ if (found) {
+ // Index signatures are type declarations, so set the modifier tokens as
+ // type tokens. Most tokens could be assumed to be type tokens, but `static`
+ // is ambiguous unless we set it explicitly here.
+ const memberStartIndex = isStatic
+ ? memberStartIndexAfterStatic - 1
+ : memberStartIndexAfterStatic;
+ for (let i = memberStartIndex; i < modifiersEndIndex; i++) {
+ state.tokens[i].isType = true;
+ }
+ return true;
+ }
+ return false;
+}
+
+// Note: The reason we do this in `parseIdentifierStatement` and not `parseStatement`
+// is that e.g. `type()` is valid JS, so we must try parsing that first.
+// If it's really a type, we will parse `type` as the statement, and can correct it here
+// by parsing the rest.
+export function tsParseIdentifierStatement(contextualKeyword) {
+ const matched = tsParseExpressionStatement(contextualKeyword);
+ if (!matched) {
+ semicolon();
+ }
+}
+
+export function tsParseExportDeclaration() {
+ // "export declare" is equivalent to just "export".
+ const isDeclare = eatContextual(ContextualKeyword._declare);
+ if (isDeclare) {
+ state.tokens[state.tokens.length - 1].type = tt._declare;
+ }
+
+ let matchedDeclaration = false;
+ if (match(tt.name)) {
+ if (isDeclare) {
+ const oldIsType = pushTypeContext(2);
+ matchedDeclaration = tsTryParseExportDeclaration();
+ popTypeContext(oldIsType);
+ } else {
+ matchedDeclaration = tsTryParseExportDeclaration();
+ }
+ }
+ if (!matchedDeclaration) {
+ if (isDeclare) {
+ const oldIsType = pushTypeContext(2);
+ parseStatement(true);
+ popTypeContext(oldIsType);
+ } else {
+ parseStatement(true);
+ }
+ }
+}
+
+export function tsAfterParseClassSuper(hasSuper) {
+ if (hasSuper && (match(tt.lessThan) || match(tt.bitShiftL))) {
+ tsParseTypeArgumentsWithPossibleBitshift();
+ }
+ if (eatContextual(ContextualKeyword._implements)) {
+ state.tokens[state.tokens.length - 1].type = tt._implements;
+ const oldIsType = pushTypeContext(1);
+ tsParseHeritageClause();
+ popTypeContext(oldIsType);
+ }
+}
+
+export function tsStartParseObjPropValue() {
+ tsTryParseTypeParameters();
+}
+
+export function tsStartParseFunctionParams() {
+ tsTryParseTypeParameters();
+}
+
+// `let x: number;`
+export function tsAfterParseVarHead() {
+ const oldIsType = pushTypeContext(0);
+ if (!hasPrecedingLineBreak()) {
+ eat(tt.bang);
+ }
+ tsTryParseTypeAnnotation();
+ popTypeContext(oldIsType);
+}
+
+// parse the return type of an async arrow function - let foo = (async (): number => {});
+export function tsStartParseAsyncArrowFromCallExpression() {
+ if (match(tt.colon)) {
+ tsParseTypeAnnotation();
+ }
+}
+
+// Returns true if the expression was an arrow function.
+export function tsParseMaybeAssign(noIn, isWithinParens) {
+ // Note: When the JSX plugin is on, type assertions (`<T> x`) aren't valid syntax.
+ if (isJSXEnabled) {
+ return tsParseMaybeAssignWithJSX(noIn, isWithinParens);
+ } else {
+ return tsParseMaybeAssignWithoutJSX(noIn, isWithinParens);
+ }
+}
+
+export function tsParseMaybeAssignWithJSX(noIn, isWithinParens) {
+ if (!match(tt.lessThan)) {
+ return baseParseMaybeAssign(noIn, isWithinParens);
+ }
+
+ // Prefer to parse JSX if possible. But may be an arrow fn.
+ const snapshot = state.snapshot();
+ let wasArrow = baseParseMaybeAssign(noIn, isWithinParens);
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ } else {
+ return wasArrow;
+ }
+
+ // Otherwise, try as type-parameterized arrow function.
+ state.type = tt.typeParameterStart;
+ // This is similar to TypeScript's `tryParseParenthesizedArrowFunctionExpression`.
+ tsParseTypeParameters();
+ wasArrow = baseParseMaybeAssign(noIn, isWithinParens);
+ if (!wasArrow) {
+ unexpected();
+ }
+
+ return wasArrow;
+}
+
+export function tsParseMaybeAssignWithoutJSX(noIn, isWithinParens) {
+ if (!match(tt.lessThan)) {
+ return baseParseMaybeAssign(noIn, isWithinParens);
+ }
+
+ const snapshot = state.snapshot();
+ // This is similar to TypeScript's `tryParseParenthesizedArrowFunctionExpression`.
+ tsParseTypeParameters();
+ const wasArrow = baseParseMaybeAssign(noIn, isWithinParens);
+ if (!wasArrow) {
+ unexpected();
+ }
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ } else {
+ return wasArrow;
+ }
+
+ // Try parsing a type cast instead of an arrow function.
+ // This will start with a type assertion (via parseMaybeUnary).
+ // But don't directly call `tsParseTypeAssertion` because we want to handle any binary after it.
+ return baseParseMaybeAssign(noIn, isWithinParens);
+}
+
+export function tsParseArrow() {
+ if (match(tt.colon)) {
+ // This is different from how the TS parser does it.
+ // TS uses lookahead. Babylon parses it as a parenthesized expression and converts.
+ const snapshot = state.snapshot();
+
+ tsParseTypeOrTypePredicateAnnotation(tt.colon);
+ if (canInsertSemicolon()) unexpected();
+ if (!match(tt.arrow)) unexpected();
+
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ }
+ }
+ return eat(tt.arrow);
+}
+
+// Allow type annotations inside of a parameter list.
+export function tsParseAssignableListItemTypes() {
+ const oldIsType = pushTypeContext(0);
+ eat(tt.question);
+ tsTryParseTypeAnnotation();
+ popTypeContext(oldIsType);
+}
+
+export function tsParseMaybeDecoratorArguments() {
+ if (match(tt.lessThan) || match(tt.bitShiftL)) {
+ tsParseTypeArgumentsWithPossibleBitshift();
+ }
+ baseParseMaybeDecoratorArguments();
+}