summaryrefslogtreecommitdiff
path: root/node_modules/sucrase/dist/esm
diff options
context:
space:
mode:
authorPhilipp Tanlak <philipp.tanlak@gmail.com>2025-11-24 20:54:57 +0100
committerPhilipp Tanlak <philipp.tanlak@gmail.com>2025-11-24 20:57:48 +0100
commitb1e2c8fd5cb5dfa46bc440a12eafaf56cd844b1c (patch)
tree49d360fd6cbc6a2754efe93524ac47ff0fbe0f7d /node_modules/sucrase/dist/esm
Docs
Diffstat (limited to 'node_modules/sucrase/dist/esm')
-rw-r--r--node_modules/sucrase/dist/esm/CJSImportProcessor.js456
-rw-r--r--node_modules/sucrase/dist/esm/HelperManager.js176
-rw-r--r--node_modules/sucrase/dist/esm/NameManager.js27
-rw-r--r--node_modules/sucrase/dist/esm/Options-gen-types.js42
-rw-r--r--node_modules/sucrase/dist/esm/Options.js101
-rw-r--r--node_modules/sucrase/dist/esm/TokenProcessor.js357
-rw-r--r--node_modules/sucrase/dist/esm/cli.js320
-rw-r--r--node_modules/sucrase/dist/esm/computeSourceMap.js89
-rw-r--r--node_modules/sucrase/dist/esm/identifyShadowedGlobals.js98
-rw-r--r--node_modules/sucrase/dist/esm/index.js133
-rw-r--r--node_modules/sucrase/dist/esm/parser/index.js31
-rw-r--r--node_modules/sucrase/dist/esm/parser/plugins/flow.js1105
-rw-r--r--node_modules/sucrase/dist/esm/parser/plugins/jsx/index.js367
-rw-r--r--node_modules/sucrase/dist/esm/parser/plugins/jsx/xhtml.js256
-rw-r--r--node_modules/sucrase/dist/esm/parser/plugins/types.js37
-rw-r--r--node_modules/sucrase/dist/esm/parser/plugins/typescript.js1632
-rw-r--r--node_modules/sucrase/dist/esm/parser/tokenizer/index.js1004
-rw-r--r--node_modules/sucrase/dist/esm/parser/tokenizer/keywords.js43
-rw-r--r--node_modules/sucrase/dist/esm/parser/tokenizer/readWord.js64
-rw-r--r--node_modules/sucrase/dist/esm/parser/tokenizer/readWordTree.js671
-rw-r--r--node_modules/sucrase/dist/esm/parser/tokenizer/state.js106
-rw-r--r--node_modules/sucrase/dist/esm/parser/tokenizer/types.js361
-rw-r--r--node_modules/sucrase/dist/esm/parser/traverser/base.js60
-rw-r--r--node_modules/sucrase/dist/esm/parser/traverser/expression.js1022
-rw-r--r--node_modules/sucrase/dist/esm/parser/traverser/index.js18
-rw-r--r--node_modules/sucrase/dist/esm/parser/traverser/lval.js159
-rw-r--r--node_modules/sucrase/dist/esm/parser/traverser/statement.js1332
-rw-r--r--node_modules/sucrase/dist/esm/parser/traverser/util.js104
-rw-r--r--node_modules/sucrase/dist/esm/parser/util/charcodes.js115
-rw-r--r--node_modules/sucrase/dist/esm/parser/util/identifier.js34
-rw-r--r--node_modules/sucrase/dist/esm/parser/util/whitespace.js33
-rw-r--r--node_modules/sucrase/dist/esm/register.js88
-rw-r--r--node_modules/sucrase/dist/esm/transformers/CJSImportTransformer.js916
-rw-r--r--node_modules/sucrase/dist/esm/transformers/ESMImportTransformer.js415
-rw-r--r--node_modules/sucrase/dist/esm/transformers/FlowTransformer.js182
-rw-r--r--node_modules/sucrase/dist/esm/transformers/JSXTransformer.js733
-rw-r--r--node_modules/sucrase/dist/esm/transformers/JestHoistTransformer.js111
-rw-r--r--node_modules/sucrase/dist/esm/transformers/NumericSeparatorTransformer.js20
-rw-r--r--node_modules/sucrase/dist/esm/transformers/OptionalCatchBindingTransformer.js19
-rw-r--r--node_modules/sucrase/dist/esm/transformers/OptionalChainingNullishTransformer.js155
-rw-r--r--node_modules/sucrase/dist/esm/transformers/ReactDisplayNameTransformer.js160
-rw-r--r--node_modules/sucrase/dist/esm/transformers/ReactHotLoaderTransformer.js69
-rw-r--r--node_modules/sucrase/dist/esm/transformers/RootTransformer.js462
-rw-r--r--node_modules/sucrase/dist/esm/transformers/Transformer.js16
-rw-r--r--node_modules/sucrase/dist/esm/transformers/TypeScriptTransformer.js279
-rw-r--r--node_modules/sucrase/dist/esm/util/elideImportEquals.js29
-rw-r--r--node_modules/sucrase/dist/esm/util/formatTokens.js74
-rw-r--r--node_modules/sucrase/dist/esm/util/getClassInfo.js352
-rw-r--r--node_modules/sucrase/dist/esm/util/getDeclarationInfo.js40
-rw-r--r--node_modules/sucrase/dist/esm/util/getIdentifierNames.js15
-rw-r--r--node_modules/sucrase/dist/esm/util/getImportExportSpecifierInfo.js92
-rw-r--r--node_modules/sucrase/dist/esm/util/getJSXPragmaInfo.js22
-rw-r--r--node_modules/sucrase/dist/esm/util/getNonTypeIdentifiers.js43
-rw-r--r--node_modules/sucrase/dist/esm/util/getTSImportedNames.js84
-rw-r--r--node_modules/sucrase/dist/esm/util/isAsyncOperation.js38
-rw-r--r--node_modules/sucrase/dist/esm/util/isExportFrom.js18
-rw-r--r--node_modules/sucrase/dist/esm/util/isIdentifier.js81
-rw-r--r--node_modules/sucrase/dist/esm/util/removeMaybeImportAttributes.js22
-rw-r--r--node_modules/sucrase/dist/esm/util/shouldElideDefaultExport.js38
59 files changed, 14926 insertions, 0 deletions
diff --git a/node_modules/sucrase/dist/esm/CJSImportProcessor.js b/node_modules/sucrase/dist/esm/CJSImportProcessor.js
new file mode 100644
index 0000000..d8b7803
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/CJSImportProcessor.js
@@ -0,0 +1,456 @@
+
+
+
+import {isDeclaration} from "./parser/tokenizer";
+import {ContextualKeyword} from "./parser/tokenizer/keywords";
+import {TokenType as tt} from "./parser/tokenizer/types";
+
+import getImportExportSpecifierInfo from "./util/getImportExportSpecifierInfo";
+import {getNonTypeIdentifiers} from "./util/getNonTypeIdentifiers";
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * Class responsible for preprocessing and bookkeeping import and export declarations within the
+ * file.
+ *
+ * TypeScript uses a simpler mechanism that does not use functions like interopRequireDefault and
+ * interopRequireWildcard, so we also allow that mode for compatibility.
+ */
+export default class CJSImportProcessor {
+ __init() {this.nonTypeIdentifiers = new Set()}
+ __init2() {this.importInfoByPath = new Map()}
+ __init3() {this.importsToReplace = new Map()}
+ __init4() {this.identifierReplacements = new Map()}
+ __init5() {this.exportBindingsByLocalName = new Map()}
+
+ constructor(
+ nameManager,
+ tokens,
+ enableLegacyTypeScriptModuleInterop,
+ options,
+ isTypeScriptTransformEnabled,
+ keepUnusedImports,
+ helperManager,
+ ) {;this.nameManager = nameManager;this.tokens = tokens;this.enableLegacyTypeScriptModuleInterop = enableLegacyTypeScriptModuleInterop;this.options = options;this.isTypeScriptTransformEnabled = isTypeScriptTransformEnabled;this.keepUnusedImports = keepUnusedImports;this.helperManager = helperManager;CJSImportProcessor.prototype.__init.call(this);CJSImportProcessor.prototype.__init2.call(this);CJSImportProcessor.prototype.__init3.call(this);CJSImportProcessor.prototype.__init4.call(this);CJSImportProcessor.prototype.__init5.call(this);}
+
+ preprocessTokens() {
+ for (let i = 0; i < this.tokens.tokens.length; i++) {
+ if (
+ this.tokens.matches1AtIndex(i, tt._import) &&
+ !this.tokens.matches3AtIndex(i, tt._import, tt.name, tt.eq)
+ ) {
+ this.preprocessImportAtIndex(i);
+ }
+ if (
+ this.tokens.matches1AtIndex(i, tt._export) &&
+ !this.tokens.matches2AtIndex(i, tt._export, tt.eq)
+ ) {
+ this.preprocessExportAtIndex(i);
+ }
+ }
+ this.generateImportReplacements();
+ }
+
+ /**
+ * In TypeScript, import statements that only import types should be removed.
+ * This includes `import {} from 'foo';`, but not `import 'foo';`.
+ */
+ pruneTypeOnlyImports() {
+ this.nonTypeIdentifiers = getNonTypeIdentifiers(this.tokens, this.options);
+ for (const [path, importInfo] of this.importInfoByPath.entries()) {
+ if (
+ importInfo.hasBareImport ||
+ importInfo.hasStarExport ||
+ importInfo.exportStarNames.length > 0 ||
+ importInfo.namedExports.length > 0
+ ) {
+ continue;
+ }
+ const names = [
+ ...importInfo.defaultNames,
+ ...importInfo.wildcardNames,
+ ...importInfo.namedImports.map(({localName}) => localName),
+ ];
+ if (names.every((name) => this.shouldAutomaticallyElideImportedName(name))) {
+ this.importsToReplace.set(path, "");
+ }
+ }
+ }
+
+ shouldAutomaticallyElideImportedName(name) {
+ return (
+ this.isTypeScriptTransformEnabled &&
+ !this.keepUnusedImports &&
+ !this.nonTypeIdentifiers.has(name)
+ );
+ }
+
+ generateImportReplacements() {
+ for (const [path, importInfo] of this.importInfoByPath.entries()) {
+ const {
+ defaultNames,
+ wildcardNames,
+ namedImports,
+ namedExports,
+ exportStarNames,
+ hasStarExport,
+ } = importInfo;
+
+ if (
+ defaultNames.length === 0 &&
+ wildcardNames.length === 0 &&
+ namedImports.length === 0 &&
+ namedExports.length === 0 &&
+ exportStarNames.length === 0 &&
+ !hasStarExport
+ ) {
+ // Import is never used, so don't even assign a name.
+ this.importsToReplace.set(path, `require('${path}');`);
+ continue;
+ }
+
+ const primaryImportName = this.getFreeIdentifierForPath(path);
+ let secondaryImportName;
+ if (this.enableLegacyTypeScriptModuleInterop) {
+ secondaryImportName = primaryImportName;
+ } else {
+ secondaryImportName =
+ wildcardNames.length > 0 ? wildcardNames[0] : this.getFreeIdentifierForPath(path);
+ }
+ let requireCode = `var ${primaryImportName} = require('${path}');`;
+ if (wildcardNames.length > 0) {
+ for (const wildcardName of wildcardNames) {
+ const moduleExpr = this.enableLegacyTypeScriptModuleInterop
+ ? primaryImportName
+ : `${this.helperManager.getHelperName("interopRequireWildcard")}(${primaryImportName})`;
+ requireCode += ` var ${wildcardName} = ${moduleExpr};`;
+ }
+ } else if (exportStarNames.length > 0 && secondaryImportName !== primaryImportName) {
+ requireCode += ` var ${secondaryImportName} = ${this.helperManager.getHelperName(
+ "interopRequireWildcard",
+ )}(${primaryImportName});`;
+ } else if (defaultNames.length > 0 && secondaryImportName !== primaryImportName) {
+ requireCode += ` var ${secondaryImportName} = ${this.helperManager.getHelperName(
+ "interopRequireDefault",
+ )}(${primaryImportName});`;
+ }
+
+ for (const {importedName, localName} of namedExports) {
+ requireCode += ` ${this.helperManager.getHelperName(
+ "createNamedExportFrom",
+ )}(${primaryImportName}, '${localName}', '${importedName}');`;
+ }
+ for (const exportStarName of exportStarNames) {
+ requireCode += ` exports.${exportStarName} = ${secondaryImportName};`;
+ }
+ if (hasStarExport) {
+ requireCode += ` ${this.helperManager.getHelperName(
+ "createStarExport",
+ )}(${primaryImportName});`;
+ }
+
+ this.importsToReplace.set(path, requireCode);
+
+ for (const defaultName of defaultNames) {
+ this.identifierReplacements.set(defaultName, `${secondaryImportName}.default`);
+ }
+ for (const {importedName, localName} of namedImports) {
+ this.identifierReplacements.set(localName, `${primaryImportName}.${importedName}`);
+ }
+ }
+ }
+
+ getFreeIdentifierForPath(path) {
+ const components = path.split("/");
+ const lastComponent = components[components.length - 1];
+ const baseName = lastComponent.replace(/\W/g, "");
+ return this.nameManager.claimFreeName(`_${baseName}`);
+ }
+
+ preprocessImportAtIndex(index) {
+ const defaultNames = [];
+ const wildcardNames = [];
+ const namedImports = [];
+
+ index++;
+ if (
+ (this.tokens.matchesContextualAtIndex(index, ContextualKeyword._type) ||
+ this.tokens.matches1AtIndex(index, tt._typeof)) &&
+ !this.tokens.matches1AtIndex(index + 1, tt.comma) &&
+ !this.tokens.matchesContextualAtIndex(index + 1, ContextualKeyword._from)
+ ) {
+ // import type declaration, so no need to process anything.
+ return;
+ }
+
+ if (this.tokens.matches1AtIndex(index, tt.parenL)) {
+ // Dynamic import, so nothing to do
+ return;
+ }
+
+ if (this.tokens.matches1AtIndex(index, tt.name)) {
+ defaultNames.push(this.tokens.identifierNameAtIndex(index));
+ index++;
+ if (this.tokens.matches1AtIndex(index, tt.comma)) {
+ index++;
+ }
+ }
+
+ if (this.tokens.matches1AtIndex(index, tt.star)) {
+ // * as
+ index += 2;
+ wildcardNames.push(this.tokens.identifierNameAtIndex(index));
+ index++;
+ }
+
+ if (this.tokens.matches1AtIndex(index, tt.braceL)) {
+ const result = this.getNamedImports(index + 1);
+ index = result.newIndex;
+
+ for (const namedImport of result.namedImports) {
+ // Treat {default as X} as a default import to ensure usage of require interop helper
+ if (namedImport.importedName === "default") {
+ defaultNames.push(namedImport.localName);
+ } else {
+ namedImports.push(namedImport);
+ }
+ }
+ }
+
+ if (this.tokens.matchesContextualAtIndex(index, ContextualKeyword._from)) {
+ index++;
+ }
+
+ if (!this.tokens.matches1AtIndex(index, tt.string)) {
+ throw new Error("Expected string token at the end of import statement.");
+ }
+ const path = this.tokens.stringValueAtIndex(index);
+ const importInfo = this.getImportInfo(path);
+ importInfo.defaultNames.push(...defaultNames);
+ importInfo.wildcardNames.push(...wildcardNames);
+ importInfo.namedImports.push(...namedImports);
+ if (defaultNames.length === 0 && wildcardNames.length === 0 && namedImports.length === 0) {
+ importInfo.hasBareImport = true;
+ }
+ }
+
+ preprocessExportAtIndex(index) {
+ if (
+ this.tokens.matches2AtIndex(index, tt._export, tt._var) ||
+ this.tokens.matches2AtIndex(index, tt._export, tt._let) ||
+ this.tokens.matches2AtIndex(index, tt._export, tt._const)
+ ) {
+ this.preprocessVarExportAtIndex(index);
+ } else if (
+ this.tokens.matches2AtIndex(index, tt._export, tt._function) ||
+ this.tokens.matches2AtIndex(index, tt._export, tt._class)
+ ) {
+ const exportName = this.tokens.identifierNameAtIndex(index + 2);
+ this.addExportBinding(exportName, exportName);
+ } else if (this.tokens.matches3AtIndex(index, tt._export, tt.name, tt._function)) {
+ const exportName = this.tokens.identifierNameAtIndex(index + 3);
+ this.addExportBinding(exportName, exportName);
+ } else if (this.tokens.matches2AtIndex(index, tt._export, tt.braceL)) {
+ this.preprocessNamedExportAtIndex(index);
+ } else if (this.tokens.matches2AtIndex(index, tt._export, tt.star)) {
+ this.preprocessExportStarAtIndex(index);
+ }
+ }
+
+ preprocessVarExportAtIndex(index) {
+ let depth = 0;
+ // Handle cases like `export let {x} = y;`, starting at the open-brace in that case.
+ for (let i = index + 2; ; i++) {
+ if (
+ this.tokens.matches1AtIndex(i, tt.braceL) ||
+ this.tokens.matches1AtIndex(i, tt.dollarBraceL) ||
+ this.tokens.matches1AtIndex(i, tt.bracketL)
+ ) {
+ depth++;
+ } else if (
+ this.tokens.matches1AtIndex(i, tt.braceR) ||
+ this.tokens.matches1AtIndex(i, tt.bracketR)
+ ) {
+ depth--;
+ } else if (depth === 0 && !this.tokens.matches1AtIndex(i, tt.name)) {
+ break;
+ } else if (this.tokens.matches1AtIndex(1, tt.eq)) {
+ const endIndex = this.tokens.currentToken().rhsEndIndex;
+ if (endIndex == null) {
+ throw new Error("Expected = token with an end index.");
+ }
+ i = endIndex - 1;
+ } else {
+ const token = this.tokens.tokens[i];
+ if (isDeclaration(token)) {
+ const exportName = this.tokens.identifierNameAtIndex(i);
+ this.identifierReplacements.set(exportName, `exports.${exportName}`);
+ }
+ }
+ }
+ }
+
+ /**
+ * Walk this export statement just in case it's an export...from statement.
+ * If it is, combine it into the import info for that path. Otherwise, just
+ * bail out; it'll be handled later.
+ */
+ preprocessNamedExportAtIndex(index) {
+ // export {
+ index += 2;
+ const {newIndex, namedImports} = this.getNamedImports(index);
+ index = newIndex;
+
+ if (this.tokens.matchesContextualAtIndex(index, ContextualKeyword._from)) {
+ index++;
+ } else {
+ // Reinterpret "a as b" to be local/exported rather than imported/local.
+ for (const {importedName: localName, localName: exportedName} of namedImports) {
+ this.addExportBinding(localName, exportedName);
+ }
+ return;
+ }
+
+ if (!this.tokens.matches1AtIndex(index, tt.string)) {
+ throw new Error("Expected string token at the end of import statement.");
+ }
+ const path = this.tokens.stringValueAtIndex(index);
+ const importInfo = this.getImportInfo(path);
+ importInfo.namedExports.push(...namedImports);
+ }
+
+ preprocessExportStarAtIndex(index) {
+ let exportedName = null;
+ if (this.tokens.matches3AtIndex(index, tt._export, tt.star, tt._as)) {
+ // export * as
+ index += 3;
+ exportedName = this.tokens.identifierNameAtIndex(index);
+ // foo from
+ index += 2;
+ } else {
+ // export * from
+ index += 3;
+ }
+ if (!this.tokens.matches1AtIndex(index, tt.string)) {
+ throw new Error("Expected string token at the end of star export statement.");
+ }
+ const path = this.tokens.stringValueAtIndex(index);
+ const importInfo = this.getImportInfo(path);
+ if (exportedName !== null) {
+ importInfo.exportStarNames.push(exportedName);
+ } else {
+ importInfo.hasStarExport = true;
+ }
+ }
+
+ getNamedImports(index) {
+ const namedImports = [];
+ while (true) {
+ if (this.tokens.matches1AtIndex(index, tt.braceR)) {
+ index++;
+ break;
+ }
+
+ const specifierInfo = getImportExportSpecifierInfo(this.tokens, index);
+ index = specifierInfo.endIndex;
+ if (!specifierInfo.isType) {
+ namedImports.push({
+ importedName: specifierInfo.leftName,
+ localName: specifierInfo.rightName,
+ });
+ }
+
+ if (this.tokens.matches2AtIndex(index, tt.comma, tt.braceR)) {
+ index += 2;
+ break;
+ } else if (this.tokens.matches1AtIndex(index, tt.braceR)) {
+ index++;
+ break;
+ } else if (this.tokens.matches1AtIndex(index, tt.comma)) {
+ index++;
+ } else {
+ throw new Error(`Unexpected token: ${JSON.stringify(this.tokens.tokens[index])}`);
+ }
+ }
+ return {newIndex: index, namedImports};
+ }
+
+ /**
+ * Get a mutable import info object for this path, creating one if it doesn't
+ * exist yet.
+ */
+ getImportInfo(path) {
+ const existingInfo = this.importInfoByPath.get(path);
+ if (existingInfo) {
+ return existingInfo;
+ }
+ const newInfo = {
+ defaultNames: [],
+ wildcardNames: [],
+ namedImports: [],
+ namedExports: [],
+ hasBareImport: false,
+ exportStarNames: [],
+ hasStarExport: false,
+ };
+ this.importInfoByPath.set(path, newInfo);
+ return newInfo;
+ }
+
+ addExportBinding(localName, exportedName) {
+ if (!this.exportBindingsByLocalName.has(localName)) {
+ this.exportBindingsByLocalName.set(localName, []);
+ }
+ this.exportBindingsByLocalName.get(localName).push(exportedName);
+ }
+
+ /**
+ * Return the code to use for the import for this path, or the empty string if
+ * the code has already been "claimed" by a previous import.
+ */
+ claimImportCode(importPath) {
+ const result = this.importsToReplace.get(importPath);
+ this.importsToReplace.set(importPath, "");
+ return result || "";
+ }
+
+ getIdentifierReplacement(identifierName) {
+ return this.identifierReplacements.get(identifierName) || null;
+ }
+
+ /**
+ * Return a string like `exports.foo = exports.bar`.
+ */
+ resolveExportBinding(assignedName) {
+ const exportedNames = this.exportBindingsByLocalName.get(assignedName);
+ if (!exportedNames || exportedNames.length === 0) {
+ return null;
+ }
+ return exportedNames.map((exportedName) => `exports.${exportedName}`).join(" = ");
+ }
+
+ /**
+ * Return all imported/exported names where we might be interested in whether usages of those
+ * names are shadowed.
+ */
+ getGlobalNames() {
+ return new Set([
+ ...this.identifierReplacements.keys(),
+ ...this.exportBindingsByLocalName.keys(),
+ ]);
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/HelperManager.js b/node_modules/sucrase/dist/esm/HelperManager.js
new file mode 100644
index 0000000..7964db3
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/HelperManager.js
@@ -0,0 +1,176 @@
+
+
+const HELPERS = {
+ require: `
+ import {createRequire as CREATE_REQUIRE_NAME} from "module";
+ const require = CREATE_REQUIRE_NAME(import.meta.url);
+ `,
+ interopRequireWildcard: `
+ function interopRequireWildcard(obj) {
+ if (obj && obj.__esModule) {
+ return obj;
+ } else {
+ var newObj = {};
+ if (obj != null) {
+ for (var key in obj) {
+ if (Object.prototype.hasOwnProperty.call(obj, key)) {
+ newObj[key] = obj[key];
+ }
+ }
+ }
+ newObj.default = obj;
+ return newObj;
+ }
+ }
+ `,
+ interopRequireDefault: `
+ function interopRequireDefault(obj) {
+ return obj && obj.__esModule ? obj : { default: obj };
+ }
+ `,
+ createNamedExportFrom: `
+ function createNamedExportFrom(obj, localName, importedName) {
+ Object.defineProperty(exports, localName, {enumerable: true, configurable: true, get: () => obj[importedName]});
+ }
+ `,
+ // Note that TypeScript and Babel do this differently; TypeScript does a simple existence
+ // check in the exports object and does a plain assignment, whereas Babel uses
+ // defineProperty and builds an object of explicitly-exported names so that star exports can
+ // always take lower precedence. For now, we do the easier TypeScript thing.
+ createStarExport: `
+ function createStarExport(obj) {
+ Object.keys(obj)
+ .filter((key) => key !== "default" && key !== "__esModule")
+ .forEach((key) => {
+ if (exports.hasOwnProperty(key)) {
+ return;
+ }
+ Object.defineProperty(exports, key, {enumerable: true, configurable: true, get: () => obj[key]});
+ });
+ }
+ `,
+ nullishCoalesce: `
+ function nullishCoalesce(lhs, rhsFn) {
+ if (lhs != null) {
+ return lhs;
+ } else {
+ return rhsFn();
+ }
+ }
+ `,
+ asyncNullishCoalesce: `
+ async function asyncNullishCoalesce(lhs, rhsFn) {
+ if (lhs != null) {
+ return lhs;
+ } else {
+ return await rhsFn();
+ }
+ }
+ `,
+ optionalChain: `
+ function optionalChain(ops) {
+ let lastAccessLHS = undefined;
+ let value = ops[0];
+ let i = 1;
+ while (i < ops.length) {
+ const op = ops[i];
+ const fn = ops[i + 1];
+ i += 2;
+ if ((op === 'optionalAccess' || op === 'optionalCall') && value == null) {
+ return undefined;
+ }
+ if (op === 'access' || op === 'optionalAccess') {
+ lastAccessLHS = value;
+ value = fn(value);
+ } else if (op === 'call' || op === 'optionalCall') {
+ value = fn((...args) => value.call(lastAccessLHS, ...args));
+ lastAccessLHS = undefined;
+ }
+ }
+ return value;
+ }
+ `,
+ asyncOptionalChain: `
+ async function asyncOptionalChain(ops) {
+ let lastAccessLHS = undefined;
+ let value = ops[0];
+ let i = 1;
+ while (i < ops.length) {
+ const op = ops[i];
+ const fn = ops[i + 1];
+ i += 2;
+ if ((op === 'optionalAccess' || op === 'optionalCall') && value == null) {
+ return undefined;
+ }
+ if (op === 'access' || op === 'optionalAccess') {
+ lastAccessLHS = value;
+ value = await fn(value);
+ } else if (op === 'call' || op === 'optionalCall') {
+ value = await fn((...args) => value.call(lastAccessLHS, ...args));
+ lastAccessLHS = undefined;
+ }
+ }
+ return value;
+ }
+ `,
+ optionalChainDelete: `
+ function optionalChainDelete(ops) {
+ const result = OPTIONAL_CHAIN_NAME(ops);
+ return result == null ? true : result;
+ }
+ `,
+ asyncOptionalChainDelete: `
+ async function asyncOptionalChainDelete(ops) {
+ const result = await ASYNC_OPTIONAL_CHAIN_NAME(ops);
+ return result == null ? true : result;
+ }
+ `,
+};
+
+export class HelperManager {
+ __init() {this.helperNames = {}}
+ __init2() {this.createRequireName = null}
+ constructor( nameManager) {;this.nameManager = nameManager;HelperManager.prototype.__init.call(this);HelperManager.prototype.__init2.call(this);}
+
+ getHelperName(baseName) {
+ let helperName = this.helperNames[baseName];
+ if (helperName) {
+ return helperName;
+ }
+ helperName = this.nameManager.claimFreeName(`_${baseName}`);
+ this.helperNames[baseName] = helperName;
+ return helperName;
+ }
+
+ emitHelpers() {
+ let resultCode = "";
+ if (this.helperNames.optionalChainDelete) {
+ this.getHelperName("optionalChain");
+ }
+ if (this.helperNames.asyncOptionalChainDelete) {
+ this.getHelperName("asyncOptionalChain");
+ }
+ for (const [baseName, helperCodeTemplate] of Object.entries(HELPERS)) {
+ const helperName = this.helperNames[baseName];
+ let helperCode = helperCodeTemplate;
+ if (baseName === "optionalChainDelete") {
+ helperCode = helperCode.replace("OPTIONAL_CHAIN_NAME", this.helperNames.optionalChain);
+ } else if (baseName === "asyncOptionalChainDelete") {
+ helperCode = helperCode.replace(
+ "ASYNC_OPTIONAL_CHAIN_NAME",
+ this.helperNames.asyncOptionalChain,
+ );
+ } else if (baseName === "require") {
+ if (this.createRequireName === null) {
+ this.createRequireName = this.nameManager.claimFreeName("_createRequire");
+ }
+ helperCode = helperCode.replace(/CREATE_REQUIRE_NAME/g, this.createRequireName);
+ }
+ if (helperName) {
+ resultCode += " ";
+ resultCode += helperCode.replace(baseName, helperName).replace(/\s+/g, " ").trim();
+ }
+ }
+ return resultCode;
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/NameManager.js b/node_modules/sucrase/dist/esm/NameManager.js
new file mode 100644
index 0000000..47d2c9f
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/NameManager.js
@@ -0,0 +1,27 @@
+
+import getIdentifierNames from "./util/getIdentifierNames";
+
+export default class NameManager {
+ __init() {this.usedNames = new Set()}
+
+ constructor(code, tokens) {;NameManager.prototype.__init.call(this);
+ this.usedNames = new Set(getIdentifierNames(code, tokens));
+ }
+
+ claimFreeName(name) {
+ const newName = this.findFreeName(name);
+ this.usedNames.add(newName);
+ return newName;
+ }
+
+ findFreeName(name) {
+ if (!this.usedNames.has(name)) {
+ return name;
+ }
+ let suffixNum = 2;
+ while (this.usedNames.has(name + String(suffixNum))) {
+ suffixNum++;
+ }
+ return name + String(suffixNum);
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/Options-gen-types.js b/node_modules/sucrase/dist/esm/Options-gen-types.js
new file mode 100644
index 0000000..5c39c2c
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/Options-gen-types.js
@@ -0,0 +1,42 @@
+/**
+ * This module was automatically generated by `ts-interface-builder`
+ */
+import * as t from "ts-interface-checker";
+// tslint:disable:object-literal-key-quotes
+
+export const Transform = t.union(
+ t.lit("jsx"),
+ t.lit("typescript"),
+ t.lit("flow"),
+ t.lit("imports"),
+ t.lit("react-hot-loader"),
+ t.lit("jest"),
+);
+
+export const SourceMapOptions = t.iface([], {
+ compiledFilename: "string",
+});
+
+export const Options = t.iface([], {
+ transforms: t.array("Transform"),
+ disableESTransforms: t.opt("boolean"),
+ jsxRuntime: t.opt(t.union(t.lit("classic"), t.lit("automatic"), t.lit("preserve"))),
+ production: t.opt("boolean"),
+ jsxImportSource: t.opt("string"),
+ jsxPragma: t.opt("string"),
+ jsxFragmentPragma: t.opt("string"),
+ keepUnusedImports: t.opt("boolean"),
+ preserveDynamicImport: t.opt("boolean"),
+ injectCreateRequireForImportRequire: t.opt("boolean"),
+ enableLegacyTypeScriptModuleInterop: t.opt("boolean"),
+ enableLegacyBabel5ModuleInterop: t.opt("boolean"),
+ sourceMapOptions: t.opt("SourceMapOptions"),
+ filePath: t.opt("string"),
+});
+
+const exportedTypeSuite = {
+ Transform,
+ SourceMapOptions,
+ Options,
+};
+export default exportedTypeSuite;
diff --git a/node_modules/sucrase/dist/esm/Options.js b/node_modules/sucrase/dist/esm/Options.js
new file mode 100644
index 0000000..83ee83d
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/Options.js
@@ -0,0 +1,101 @@
+import {createCheckers} from "ts-interface-checker";
+
+import OptionsGenTypes from "./Options-gen-types";
+
+const {Options: OptionsChecker} = createCheckers(OptionsGenTypes);
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+export function validateOptions(options) {
+ OptionsChecker.strictCheck(options);
+}
diff --git a/node_modules/sucrase/dist/esm/TokenProcessor.js b/node_modules/sucrase/dist/esm/TokenProcessor.js
new file mode 100644
index 0000000..5335f23
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/TokenProcessor.js
@@ -0,0 +1,357 @@
+
+
+
+import { TokenType as tt} from "./parser/tokenizer/types";
+import isAsyncOperation from "./util/isAsyncOperation";
+
+
+
+
+
+
+
+
+
+
+
+export default class TokenProcessor {
+ __init() {this.resultCode = ""}
+ // Array mapping input token index to optional string index position in the
+ // output code.
+ __init2() {this.resultMappings = new Array(this.tokens.length)}
+ __init3() {this.tokenIndex = 0}
+
+ constructor(
+ code,
+ tokens,
+ isFlowEnabled,
+ disableESTransforms,
+ helperManager,
+ ) {;this.code = code;this.tokens = tokens;this.isFlowEnabled = isFlowEnabled;this.disableESTransforms = disableESTransforms;this.helperManager = helperManager;TokenProcessor.prototype.__init.call(this);TokenProcessor.prototype.__init2.call(this);TokenProcessor.prototype.__init3.call(this);}
+
+ /**
+ * Snapshot the token state in a way that can be restored later, useful for
+ * things like lookahead.
+ *
+ * resultMappings do not need to be copied since in all use cases, they will
+ * be overwritten anyway after restore.
+ */
+ snapshot() {
+ return {
+ resultCode: this.resultCode,
+ tokenIndex: this.tokenIndex,
+ };
+ }
+
+ restoreToSnapshot(snapshot) {
+ this.resultCode = snapshot.resultCode;
+ this.tokenIndex = snapshot.tokenIndex;
+ }
+
+ /**
+ * Remove and return the code generated since the snapshot, leaving the
+ * current token position in-place. Unlike most TokenProcessor operations,
+ * this operation can result in input/output line number mismatches because
+ * the removed code may contain newlines, so this operation should be used
+ * sparingly.
+ */
+ dangerouslyGetAndRemoveCodeSinceSnapshot(snapshot) {
+ const result = this.resultCode.slice(snapshot.resultCode.length);
+ this.resultCode = snapshot.resultCode;
+ return result;
+ }
+
+ reset() {
+ this.resultCode = "";
+ this.resultMappings = new Array(this.tokens.length);
+ this.tokenIndex = 0;
+ }
+
+ matchesContextualAtIndex(index, contextualKeyword) {
+ return (
+ this.matches1AtIndex(index, tt.name) &&
+ this.tokens[index].contextualKeyword === contextualKeyword
+ );
+ }
+
+ identifierNameAtIndex(index) {
+ // TODO: We need to process escapes since technically you can have unicode escapes in variable
+ // names.
+ return this.identifierNameForToken(this.tokens[index]);
+ }
+
+ identifierNameAtRelativeIndex(relativeIndex) {
+ return this.identifierNameForToken(this.tokenAtRelativeIndex(relativeIndex));
+ }
+
+ identifierName() {
+ return this.identifierNameForToken(this.currentToken());
+ }
+
+ identifierNameForToken(token) {
+ return this.code.slice(token.start, token.end);
+ }
+
+ rawCodeForToken(token) {
+ return this.code.slice(token.start, token.end);
+ }
+
+ stringValueAtIndex(index) {
+ return this.stringValueForToken(this.tokens[index]);
+ }
+
+ stringValue() {
+ return this.stringValueForToken(this.currentToken());
+ }
+
+ stringValueForToken(token) {
+ // This is used to identify when two imports are the same and to resolve TypeScript enum keys.
+ // Ideally we'd process escapes within the strings, but for now we pretty much take the raw
+ // code.
+ return this.code.slice(token.start + 1, token.end - 1);
+ }
+
+ matches1AtIndex(index, t1) {
+ return this.tokens[index].type === t1;
+ }
+
+ matches2AtIndex(index, t1, t2) {
+ return this.tokens[index].type === t1 && this.tokens[index + 1].type === t2;
+ }
+
+ matches3AtIndex(index, t1, t2, t3) {
+ return (
+ this.tokens[index].type === t1 &&
+ this.tokens[index + 1].type === t2 &&
+ this.tokens[index + 2].type === t3
+ );
+ }
+
+ matches1(t1) {
+ return this.tokens[this.tokenIndex].type === t1;
+ }
+
+ matches2(t1, t2) {
+ return this.tokens[this.tokenIndex].type === t1 && this.tokens[this.tokenIndex + 1].type === t2;
+ }
+
+ matches3(t1, t2, t3) {
+ return (
+ this.tokens[this.tokenIndex].type === t1 &&
+ this.tokens[this.tokenIndex + 1].type === t2 &&
+ this.tokens[this.tokenIndex + 2].type === t3
+ );
+ }
+
+ matches4(t1, t2, t3, t4) {
+ return (
+ this.tokens[this.tokenIndex].type === t1 &&
+ this.tokens[this.tokenIndex + 1].type === t2 &&
+ this.tokens[this.tokenIndex + 2].type === t3 &&
+ this.tokens[this.tokenIndex + 3].type === t4
+ );
+ }
+
+ matches5(t1, t2, t3, t4, t5) {
+ return (
+ this.tokens[this.tokenIndex].type === t1 &&
+ this.tokens[this.tokenIndex + 1].type === t2 &&
+ this.tokens[this.tokenIndex + 2].type === t3 &&
+ this.tokens[this.tokenIndex + 3].type === t4 &&
+ this.tokens[this.tokenIndex + 4].type === t5
+ );
+ }
+
+ matchesContextual(contextualKeyword) {
+ return this.matchesContextualAtIndex(this.tokenIndex, contextualKeyword);
+ }
+
+ matchesContextIdAndLabel(type, contextId) {
+ return this.matches1(type) && this.currentToken().contextId === contextId;
+ }
+
+ previousWhitespaceAndComments() {
+ let whitespaceAndComments = this.code.slice(
+ this.tokenIndex > 0 ? this.tokens[this.tokenIndex - 1].end : 0,
+ this.tokenIndex < this.tokens.length ? this.tokens[this.tokenIndex].start : this.code.length,
+ );
+ if (this.isFlowEnabled) {
+ whitespaceAndComments = whitespaceAndComments.replace(/@flow/g, "");
+ }
+ return whitespaceAndComments;
+ }
+
+ replaceToken(newCode) {
+ this.resultCode += this.previousWhitespaceAndComments();
+ this.appendTokenPrefix();
+ this.resultMappings[this.tokenIndex] = this.resultCode.length;
+ this.resultCode += newCode;
+ this.appendTokenSuffix();
+ this.tokenIndex++;
+ }
+
+ replaceTokenTrimmingLeftWhitespace(newCode) {
+ this.resultCode += this.previousWhitespaceAndComments().replace(/[^\r\n]/g, "");
+ this.appendTokenPrefix();
+ this.resultMappings[this.tokenIndex] = this.resultCode.length;
+ this.resultCode += newCode;
+ this.appendTokenSuffix();
+ this.tokenIndex++;
+ }
+
+ removeInitialToken() {
+ this.replaceToken("");
+ }
+
+ removeToken() {
+ this.replaceTokenTrimmingLeftWhitespace("");
+ }
+
+ /**
+ * Remove all code until the next }, accounting for balanced braces.
+ */
+ removeBalancedCode() {
+ let braceDepth = 0;
+ while (!this.isAtEnd()) {
+ if (this.matches1(tt.braceL)) {
+ braceDepth++;
+ } else if (this.matches1(tt.braceR)) {
+ if (braceDepth === 0) {
+ return;
+ }
+ braceDepth--;
+ }
+ this.removeToken();
+ }
+ }
+
+ copyExpectedToken(tokenType) {
+ if (this.tokens[this.tokenIndex].type !== tokenType) {
+ throw new Error(`Expected token ${tokenType}`);
+ }
+ this.copyToken();
+ }
+
+ copyToken() {
+ this.resultCode += this.previousWhitespaceAndComments();
+ this.appendTokenPrefix();
+ this.resultMappings[this.tokenIndex] = this.resultCode.length;
+ this.resultCode += this.code.slice(
+ this.tokens[this.tokenIndex].start,
+ this.tokens[this.tokenIndex].end,
+ );
+ this.appendTokenSuffix();
+ this.tokenIndex++;
+ }
+
+ copyTokenWithPrefix(prefix) {
+ this.resultCode += this.previousWhitespaceAndComments();
+ this.appendTokenPrefix();
+ this.resultCode += prefix;
+ this.resultMappings[this.tokenIndex] = this.resultCode.length;
+ this.resultCode += this.code.slice(
+ this.tokens[this.tokenIndex].start,
+ this.tokens[this.tokenIndex].end,
+ );
+ this.appendTokenSuffix();
+ this.tokenIndex++;
+ }
+
+ appendTokenPrefix() {
+ const token = this.currentToken();
+ if (token.numNullishCoalesceStarts || token.isOptionalChainStart) {
+ token.isAsyncOperation = isAsyncOperation(this);
+ }
+ if (this.disableESTransforms) {
+ return;
+ }
+ if (token.numNullishCoalesceStarts) {
+ for (let i = 0; i < token.numNullishCoalesceStarts; i++) {
+ if (token.isAsyncOperation) {
+ this.resultCode += "await ";
+ this.resultCode += this.helperManager.getHelperName("asyncNullishCoalesce");
+ } else {
+ this.resultCode += this.helperManager.getHelperName("nullishCoalesce");
+ }
+ this.resultCode += "(";
+ }
+ }
+ if (token.isOptionalChainStart) {
+ if (token.isAsyncOperation) {
+ this.resultCode += "await ";
+ }
+ if (this.tokenIndex > 0 && this.tokenAtRelativeIndex(-1).type === tt._delete) {
+ if (token.isAsyncOperation) {
+ this.resultCode += this.helperManager.getHelperName("asyncOptionalChainDelete");
+ } else {
+ this.resultCode += this.helperManager.getHelperName("optionalChainDelete");
+ }
+ } else if (token.isAsyncOperation) {
+ this.resultCode += this.helperManager.getHelperName("asyncOptionalChain");
+ } else {
+ this.resultCode += this.helperManager.getHelperName("optionalChain");
+ }
+ this.resultCode += "([";
+ }
+ }
+
+ appendTokenSuffix() {
+ const token = this.currentToken();
+ if (token.isOptionalChainEnd && !this.disableESTransforms) {
+ this.resultCode += "])";
+ }
+ if (token.numNullishCoalesceEnds && !this.disableESTransforms) {
+ for (let i = 0; i < token.numNullishCoalesceEnds; i++) {
+ this.resultCode += "))";
+ }
+ }
+ }
+
+ appendCode(code) {
+ this.resultCode += code;
+ }
+
+ currentToken() {
+ return this.tokens[this.tokenIndex];
+ }
+
+ currentTokenCode() {
+ const token = this.currentToken();
+ return this.code.slice(token.start, token.end);
+ }
+
+ tokenAtRelativeIndex(relativeIndex) {
+ return this.tokens[this.tokenIndex + relativeIndex];
+ }
+
+ currentIndex() {
+ return this.tokenIndex;
+ }
+
+ /**
+ * Move to the next token. Only suitable in preprocessing steps. When
+ * generating new code, you should use copyToken or removeToken.
+ */
+ nextToken() {
+ if (this.tokenIndex === this.tokens.length) {
+ throw new Error("Unexpectedly reached end of input.");
+ }
+ this.tokenIndex++;
+ }
+
+ previousToken() {
+ this.tokenIndex--;
+ }
+
+ finish() {
+ if (this.tokenIndex !== this.tokens.length) {
+ throw new Error("Tried to finish processing tokens before reaching the end.");
+ }
+ this.resultCode += this.previousWhitespaceAndComments();
+ return {code: this.resultCode, mappings: this.resultMappings};
+ }
+
+ isAtEnd() {
+ return this.tokenIndex === this.tokens.length;
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/cli.js b/node_modules/sucrase/dist/esm/cli.js
new file mode 100644
index 0000000..93fbb6e
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/cli.js
@@ -0,0 +1,320 @@
+/* eslint-disable no-console */
+import commander from "commander";
+import globCb from "glob";
+import {exists, mkdir, readdir, readFile, stat, writeFile} from "mz/fs";
+import {dirname, join, relative} from "path";
+import {promisify} from "util";
+
+import { transform} from "./index";
+
+
+
+
+
+
+
+
+
+
+
+const glob = promisify(globCb);
+
+export default function run() {
+ commander
+ .description(`Sucrase: super-fast Babel alternative.`)
+ .usage("[options] <srcDir>")
+ .option(
+ "-d, --out-dir <out>",
+ "Compile an input directory of modules into an output directory.",
+ )
+ .option(
+ "-p, --project <dir>",
+ "Compile a TypeScript project, will read from tsconfig.json in <dir>",
+ )
+ .option("--out-extension <extension>", "File extension to use for all output files.", "js")
+ .option("--exclude-dirs <paths>", "Names of directories that should not be traversed.")
+ .option("-q, --quiet", "Don't print the names of converted files.")
+ .option("-t, --transforms <transforms>", "Comma-separated list of transforms to run.")
+ .option("--disable-es-transforms", "Opt out of all ES syntax transforms.")
+ .option("--jsx-runtime <string>", "Transformation mode for the JSX transform.")
+ .option("--production", "Disable debugging information from JSX in output.")
+ .option(
+ "--jsx-import-source <string>",
+ "Automatic JSX transform import path prefix, defaults to `React.Fragment`.",
+ )
+ .option(
+ "--jsx-pragma <string>",
+ "Classic JSX transform element creation function, defaults to `React.createElement`.",
+ )
+ .option(
+ "--jsx-fragment-pragma <string>",
+ "Classic JSX transform fragment component, defaults to `React.Fragment`.",
+ )
+ .option("--keep-unused-imports", "Disable automatic removal of type-only imports/exports.")
+ .option("--preserve-dynamic-import", "Don't transpile dynamic import() to require.")
+ .option(
+ "--inject-create-require-for-import-require",
+ "Use `createRequire` when transpiling TS `import = require` to ESM.",
+ )
+ .option(
+ "--enable-legacy-typescript-module-interop",
+ "Use default TypeScript ESM/CJS interop strategy.",
+ )
+ .option("--enable-legacy-babel5-module-interop", "Use Babel 5 ESM/CJS interop strategy.")
+ .parse(process.argv);
+
+ if (commander.project) {
+ if (
+ commander.outDir ||
+ commander.transforms ||
+ commander.args[0] ||
+ commander.enableLegacyTypescriptModuleInterop
+ ) {
+ console.error(
+ "If TypeScript project is specified, out directory, transforms, source " +
+ "directory, and --enable-legacy-typescript-module-interop may not be specified.",
+ );
+ process.exit(1);
+ }
+ } else {
+ if (!commander.outDir) {
+ console.error("Out directory is required");
+ process.exit(1);
+ }
+
+ if (!commander.transforms) {
+ console.error("Transforms option is required.");
+ process.exit(1);
+ }
+
+ if (!commander.args[0]) {
+ console.error("Source directory is required.");
+ process.exit(1);
+ }
+ }
+
+ const options = {
+ outDirPath: commander.outDir,
+ srcDirPath: commander.args[0],
+ project: commander.project,
+ outExtension: commander.outExtension,
+ excludeDirs: commander.excludeDirs ? commander.excludeDirs.split(",") : [],
+ quiet: commander.quiet,
+ sucraseOptions: {
+ transforms: commander.transforms ? commander.transforms.split(",") : [],
+ disableESTransforms: commander.disableEsTransforms,
+ jsxRuntime: commander.jsxRuntime,
+ production: commander.production,
+ jsxImportSource: commander.jsxImportSource,
+ jsxPragma: commander.jsxPragma || "React.createElement",
+ jsxFragmentPragma: commander.jsxFragmentPragma || "React.Fragment",
+ keepUnusedImports: commander.keepUnusedImports,
+ preserveDynamicImport: commander.preserveDynamicImport,
+ injectCreateRequireForImportRequire: commander.injectCreateRequireForImportRequire,
+ enableLegacyTypeScriptModuleInterop: commander.enableLegacyTypescriptModuleInterop,
+ enableLegacyBabel5ModuleInterop: commander.enableLegacyBabel5ModuleInterop,
+ },
+ };
+
+ buildDirectory(options).catch((e) => {
+ process.exitCode = 1;
+ console.error(e);
+ });
+}
+
+
+
+
+
+
+async function findFiles(options) {
+ const outDirPath = options.outDirPath;
+ const srcDirPath = options.srcDirPath;
+
+ const extensions = options.sucraseOptions.transforms.includes("typescript")
+ ? [".ts", ".tsx"]
+ : [".js", ".jsx"];
+
+ if (!(await exists(outDirPath))) {
+ await mkdir(outDirPath);
+ }
+
+ const outArr = [];
+ for (const child of await readdir(srcDirPath)) {
+ if (["node_modules", ".git"].includes(child) || options.excludeDirs.includes(child)) {
+ continue;
+ }
+ const srcChildPath = join(srcDirPath, child);
+ const outChildPath = join(outDirPath, child);
+ if ((await stat(srcChildPath)).isDirectory()) {
+ const innerOptions = {...options};
+ innerOptions.srcDirPath = srcChildPath;
+ innerOptions.outDirPath = outChildPath;
+ const innerFiles = await findFiles(innerOptions);
+ outArr.push(...innerFiles);
+ } else if (extensions.some((ext) => srcChildPath.endsWith(ext))) {
+ const outPath = outChildPath.replace(/\.\w+$/, `.${options.outExtension}`);
+ outArr.push({
+ srcPath: srcChildPath,
+ outPath,
+ });
+ }
+ }
+
+ return outArr;
+}
+
+async function runGlob(options) {
+ const tsConfigPath = join(options.project, "tsconfig.json");
+
+ let str;
+ try {
+ str = await readFile(tsConfigPath, "utf8");
+ } catch (err) {
+ console.error("Could not find project tsconfig.json");
+ console.error(` --project=${options.project}`);
+ console.error(err);
+ process.exit(1);
+ }
+ const json = JSON.parse(str);
+
+ const foundFiles = [];
+
+ const files = json.files;
+ const include = json.include;
+
+ const absProject = join(process.cwd(), options.project);
+ const outDirs = [];
+
+ if (!(await exists(options.outDirPath))) {
+ await mkdir(options.outDirPath);
+ }
+
+ if (files) {
+ for (const file of files) {
+ if (file.endsWith(".d.ts")) {
+ continue;
+ }
+ if (!file.endsWith(".ts") && !file.endsWith(".js")) {
+ continue;
+ }
+
+ const srcFile = join(absProject, file);
+ const outFile = join(options.outDirPath, file);
+ const outPath = outFile.replace(/\.\w+$/, `.${options.outExtension}`);
+
+ const outDir = dirname(outPath);
+ if (!outDirs.includes(outDir)) {
+ outDirs.push(outDir);
+ }
+
+ foundFiles.push({
+ srcPath: srcFile,
+ outPath,
+ });
+ }
+ }
+ if (include) {
+ for (const pattern of include) {
+ const globFiles = await glob(join(absProject, pattern));
+ for (const file of globFiles) {
+ if (!file.endsWith(".ts") && !file.endsWith(".js")) {
+ continue;
+ }
+ if (file.endsWith(".d.ts")) {
+ continue;
+ }
+
+ const relativeFile = relative(absProject, file);
+ const outFile = join(options.outDirPath, relativeFile);
+ const outPath = outFile.replace(/\.\w+$/, `.${options.outExtension}`);
+
+ const outDir = dirname(outPath);
+ if (!outDirs.includes(outDir)) {
+ outDirs.push(outDir);
+ }
+
+ foundFiles.push({
+ srcPath: file,
+ outPath,
+ });
+ }
+ }
+ }
+
+ for (const outDirPath of outDirs) {
+ if (!(await exists(outDirPath))) {
+ await mkdir(outDirPath);
+ }
+ }
+
+ // TODO: read exclude
+
+ return foundFiles;
+}
+
+async function updateOptionsFromProject(options) {
+ /**
+ * Read the project information and assign the following.
+ * - outDirPath
+ * - transform: imports
+ * - transform: typescript
+ * - enableLegacyTypescriptModuleInterop: true/false.
+ */
+
+ const tsConfigPath = join(options.project, "tsconfig.json");
+
+ let str;
+ try {
+ str = await readFile(tsConfigPath, "utf8");
+ } catch (err) {
+ console.error("Could not find project tsconfig.json");
+ console.error(` --project=${options.project}`);
+ console.error(err);
+ process.exit(1);
+ }
+ const json = JSON.parse(str);
+ const sucraseOpts = options.sucraseOptions;
+ if (!sucraseOpts.transforms.includes("typescript")) {
+ sucraseOpts.transforms.push("typescript");
+ }
+
+ const compilerOpts = json.compilerOptions;
+ if (compilerOpts.outDir) {
+ options.outDirPath = join(process.cwd(), options.project, compilerOpts.outDir);
+ }
+ if (compilerOpts.esModuleInterop !== true) {
+ sucraseOpts.enableLegacyTypeScriptModuleInterop = true;
+ }
+ if (compilerOpts.module === "commonjs") {
+ if (!sucraseOpts.transforms.includes("imports")) {
+ sucraseOpts.transforms.push("imports");
+ }
+ }
+}
+
+async function buildDirectory(options) {
+ let files;
+ if (options.outDirPath && options.srcDirPath) {
+ files = await findFiles(options);
+ } else if (options.project) {
+ await updateOptionsFromProject(options);
+ files = await runGlob(options);
+ } else {
+ console.error("Project or Source directory required.");
+ process.exit(1);
+ }
+
+ for (const file of files) {
+ await buildFile(file.srcPath, file.outPath, options);
+ }
+}
+
+async function buildFile(srcPath, outPath, options) {
+ if (!options.quiet) {
+ console.log(`${srcPath} -> ${outPath}`);
+ }
+ const code = (await readFile(srcPath)).toString();
+ const transformedCode = transform(code, {...options.sucraseOptions, filePath: srcPath}).code;
+ await writeFile(outPath, transformedCode);
+}
diff --git a/node_modules/sucrase/dist/esm/computeSourceMap.js b/node_modules/sucrase/dist/esm/computeSourceMap.js
new file mode 100644
index 0000000..699ea78
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/computeSourceMap.js
@@ -0,0 +1,89 @@
+import {GenMapping, maybeAddSegment, toEncodedMap} from "@jridgewell/gen-mapping";
+
+
+
+import {charCodes} from "./parser/util/charcodes";
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * Generate a source map indicating that each line maps directly to the original line,
+ * with the tokens in their new positions.
+ */
+export default function computeSourceMap(
+ {code: generatedCode, mappings: rawMappings},
+ filePath,
+ options,
+ source,
+ tokens,
+) {
+ const sourceColumns = computeSourceColumns(source, tokens);
+ const map = new GenMapping({file: options.compiledFilename});
+ let tokenIndex = 0;
+ // currentMapping is the output source index for the current input token being
+ // considered.
+ let currentMapping = rawMappings[0];
+ while (currentMapping === undefined && tokenIndex < rawMappings.length - 1) {
+ tokenIndex++;
+ currentMapping = rawMappings[tokenIndex];
+ }
+ let line = 0;
+ let lineStart = 0;
+ if (currentMapping !== lineStart) {
+ maybeAddSegment(map, line, 0, filePath, line, 0);
+ }
+ for (let i = 0; i < generatedCode.length; i++) {
+ if (i === currentMapping) {
+ const genColumn = currentMapping - lineStart;
+ const sourceColumn = sourceColumns[tokenIndex];
+ maybeAddSegment(map, line, genColumn, filePath, line, sourceColumn);
+ while (
+ (currentMapping === i || currentMapping === undefined) &&
+ tokenIndex < rawMappings.length - 1
+ ) {
+ tokenIndex++;
+ currentMapping = rawMappings[tokenIndex];
+ }
+ }
+ if (generatedCode.charCodeAt(i) === charCodes.lineFeed) {
+ line++;
+ lineStart = i + 1;
+ if (currentMapping !== lineStart) {
+ maybeAddSegment(map, line, 0, filePath, line, 0);
+ }
+ }
+ }
+ const {sourceRoot, sourcesContent, ...sourceMap} = toEncodedMap(map);
+ return sourceMap ;
+}
+
+/**
+ * Create an array mapping each token index to the 0-based column of the start
+ * position of the token.
+ */
+function computeSourceColumns(code, tokens) {
+ const sourceColumns = new Array(tokens.length);
+ let tokenIndex = 0;
+ let currentMapping = tokens[tokenIndex].start;
+ let lineStart = 0;
+ for (let i = 0; i < code.length; i++) {
+ if (i === currentMapping) {
+ sourceColumns[tokenIndex] = currentMapping - lineStart;
+ tokenIndex++;
+ currentMapping = tokens[tokenIndex].start;
+ }
+ if (code.charCodeAt(i) === charCodes.lineFeed) {
+ lineStart = i + 1;
+ }
+ }
+ return sourceColumns;
+}
diff --git a/node_modules/sucrase/dist/esm/identifyShadowedGlobals.js b/node_modules/sucrase/dist/esm/identifyShadowedGlobals.js
new file mode 100644
index 0000000..f953633
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/identifyShadowedGlobals.js
@@ -0,0 +1,98 @@
+import {
+ isBlockScopedDeclaration,
+ isFunctionScopedDeclaration,
+ isNonTopLevelDeclaration,
+} from "./parser/tokenizer";
+
+import {TokenType as tt} from "./parser/tokenizer/types";
+
+
+/**
+ * Traverse the given tokens and modify them if necessary to indicate that some names shadow global
+ * variables.
+ */
+export default function identifyShadowedGlobals(
+ tokens,
+ scopes,
+ globalNames,
+) {
+ if (!hasShadowedGlobals(tokens, globalNames)) {
+ return;
+ }
+ markShadowedGlobals(tokens, scopes, globalNames);
+}
+
+/**
+ * We can do a fast up-front check to see if there are any declarations to global names. If not,
+ * then there's no point in computing scope assignments.
+ */
+// Exported for testing.
+export function hasShadowedGlobals(tokens, globalNames) {
+ for (const token of tokens.tokens) {
+ if (
+ token.type === tt.name &&
+ !token.isType &&
+ isNonTopLevelDeclaration(token) &&
+ globalNames.has(tokens.identifierNameForToken(token))
+ ) {
+ return true;
+ }
+ }
+ return false;
+}
+
+function markShadowedGlobals(
+ tokens,
+ scopes,
+ globalNames,
+) {
+ const scopeStack = [];
+ let scopeIndex = scopes.length - 1;
+ // Scopes were generated at completion time, so they're sorted by end index, so we can maintain a
+ // good stack by going backwards through them.
+ for (let i = tokens.tokens.length - 1; ; i--) {
+ while (scopeStack.length > 0 && scopeStack[scopeStack.length - 1].startTokenIndex === i + 1) {
+ scopeStack.pop();
+ }
+ while (scopeIndex >= 0 && scopes[scopeIndex].endTokenIndex === i + 1) {
+ scopeStack.push(scopes[scopeIndex]);
+ scopeIndex--;
+ }
+ // Process scopes after the last iteration so we can make sure we pop all of them.
+ if (i < 0) {
+ break;
+ }
+
+ const token = tokens.tokens[i];
+ const name = tokens.identifierNameForToken(token);
+ if (scopeStack.length > 1 && !token.isType && token.type === tt.name && globalNames.has(name)) {
+ if (isBlockScopedDeclaration(token)) {
+ markShadowedForScope(scopeStack[scopeStack.length - 1], tokens, name);
+ } else if (isFunctionScopedDeclaration(token)) {
+ let stackIndex = scopeStack.length - 1;
+ while (stackIndex > 0 && !scopeStack[stackIndex].isFunctionScope) {
+ stackIndex--;
+ }
+ if (stackIndex < 0) {
+ throw new Error("Did not find parent function scope.");
+ }
+ markShadowedForScope(scopeStack[stackIndex], tokens, name);
+ }
+ }
+ }
+ if (scopeStack.length > 0) {
+ throw new Error("Expected empty scope stack after processing file.");
+ }
+}
+
+function markShadowedForScope(scope, tokens, name) {
+ for (let i = scope.startTokenIndex; i < scope.endTokenIndex; i++) {
+ const token = tokens.tokens[i];
+ if (
+ (token.type === tt.name || token.type === tt.jsxName) &&
+ tokens.identifierNameForToken(token) === name
+ ) {
+ token.shadowsGlobal = true;
+ }
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/index.js b/node_modules/sucrase/dist/esm/index.js
new file mode 100644
index 0000000..e902716
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/index.js
@@ -0,0 +1,133 @@
+import CJSImportProcessor from "./CJSImportProcessor";
+import computeSourceMap, {} from "./computeSourceMap";
+import {HelperManager} from "./HelperManager";
+import identifyShadowedGlobals from "./identifyShadowedGlobals";
+import NameManager from "./NameManager";
+import {validateOptions} from "./Options";
+
+import {parse} from "./parser";
+
+import TokenProcessor from "./TokenProcessor";
+import RootTransformer from "./transformers/RootTransformer";
+import formatTokens from "./util/formatTokens";
+import getTSImportedNames from "./util/getTSImportedNames";
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+;
+
+export function getVersion() {
+ /* istanbul ignore next */
+ return "3.34.0";
+}
+
+export function transform(code, options) {
+ validateOptions(options);
+ try {
+ const sucraseContext = getSucraseContext(code, options);
+ const transformer = new RootTransformer(
+ sucraseContext,
+ options.transforms,
+ Boolean(options.enableLegacyBabel5ModuleInterop),
+ options,
+ );
+ const transformerResult = transformer.transform();
+ let result = {code: transformerResult.code};
+ if (options.sourceMapOptions) {
+ if (!options.filePath) {
+ throw new Error("filePath must be specified when generating a source map.");
+ }
+ result = {
+ ...result,
+ sourceMap: computeSourceMap(
+ transformerResult,
+ options.filePath,
+ options.sourceMapOptions,
+ code,
+ sucraseContext.tokenProcessor.tokens,
+ ),
+ };
+ }
+ return result;
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ } catch (e) {
+ if (options.filePath) {
+ e.message = `Error transforming ${options.filePath}: ${e.message}`;
+ }
+ throw e;
+ }
+}
+
+/**
+ * Return a string representation of the sucrase tokens, mostly useful for
+ * diagnostic purposes.
+ */
+export function getFormattedTokens(code, options) {
+ const tokens = getSucraseContext(code, options).tokenProcessor.tokens;
+ return formatTokens(code, tokens);
+}
+
+/**
+ * Call into the parser/tokenizer and do some further preprocessing:
+ * - Come up with a set of used names so that we can assign new names.
+ * - Preprocess all import/export statements so we know which globals we are interested in.
+ * - Compute situations where any of those globals are shadowed.
+ *
+ * In the future, some of these preprocessing steps can be skipped based on what actual work is
+ * being done.
+ */
+function getSucraseContext(code, options) {
+ const isJSXEnabled = options.transforms.includes("jsx");
+ const isTypeScriptEnabled = options.transforms.includes("typescript");
+ const isFlowEnabled = options.transforms.includes("flow");
+ const disableESTransforms = options.disableESTransforms === true;
+ const file = parse(code, isJSXEnabled, isTypeScriptEnabled, isFlowEnabled);
+ const tokens = file.tokens;
+ const scopes = file.scopes;
+
+ const nameManager = new NameManager(code, tokens);
+ const helperManager = new HelperManager(nameManager);
+ const tokenProcessor = new TokenProcessor(
+ code,
+ tokens,
+ isFlowEnabled,
+ disableESTransforms,
+ helperManager,
+ );
+ const enableLegacyTypeScriptModuleInterop = Boolean(options.enableLegacyTypeScriptModuleInterop);
+
+ let importProcessor = null;
+ if (options.transforms.includes("imports")) {
+ importProcessor = new CJSImportProcessor(
+ nameManager,
+ tokenProcessor,
+ enableLegacyTypeScriptModuleInterop,
+ options,
+ options.transforms.includes("typescript"),
+ Boolean(options.keepUnusedImports),
+ helperManager,
+ );
+ importProcessor.preprocessTokens();
+ // We need to mark shadowed globals after processing imports so we know that the globals are,
+ // but before type-only import pruning, since that relies on shadowing information.
+ identifyShadowedGlobals(tokenProcessor, scopes, importProcessor.getGlobalNames());
+ if (options.transforms.includes("typescript") && !options.keepUnusedImports) {
+ importProcessor.pruneTypeOnlyImports();
+ }
+ } else if (options.transforms.includes("typescript") && !options.keepUnusedImports) {
+ // Shadowed global detection is needed for TS implicit elision of imported names.
+ identifyShadowedGlobals(tokenProcessor, scopes, getTSImportedNames(tokenProcessor));
+ }
+ return {tokenProcessor, scopes, nameManager, importProcessor, helperManager};
+}
diff --git a/node_modules/sucrase/dist/esm/parser/index.js b/node_modules/sucrase/dist/esm/parser/index.js
new file mode 100644
index 0000000..5074ae4
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/index.js
@@ -0,0 +1,31 @@
+
+
+import {augmentError, initParser, state} from "./traverser/base";
+import {parseFile} from "./traverser/index";
+
+export class File {
+
+
+
+ constructor(tokens, scopes) {
+ this.tokens = tokens;
+ this.scopes = scopes;
+ }
+}
+
+export function parse(
+ input,
+ isJSXEnabled,
+ isTypeScriptEnabled,
+ isFlowEnabled,
+) {
+ if (isFlowEnabled && isTypeScriptEnabled) {
+ throw new Error("Cannot combine flow and typescript plugins.");
+ }
+ initParser(input, isJSXEnabled, isTypeScriptEnabled, isFlowEnabled);
+ const result = parseFile();
+ if (state.error) {
+ throw augmentError(state.error);
+ }
+ return result;
+}
diff --git a/node_modules/sucrase/dist/esm/parser/plugins/flow.js b/node_modules/sucrase/dist/esm/parser/plugins/flow.js
new file mode 100644
index 0000000..66295d1
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/plugins/flow.js
@@ -0,0 +1,1105 @@
+/* eslint max-len: 0 */
+
+import {
+ eat,
+ lookaheadType,
+ lookaheadTypeAndKeyword,
+ match,
+ next,
+ popTypeContext,
+ pushTypeContext,
+
+} from "../tokenizer/index";
+import {ContextualKeyword} from "../tokenizer/keywords";
+import {TokenType, TokenType as tt} from "../tokenizer/types";
+import {input, state} from "../traverser/base";
+import {
+ baseParseMaybeAssign,
+ baseParseSubscript,
+ baseParseSubscripts,
+ parseArrow,
+ parseArrowExpression,
+ parseCallExpressionArguments,
+ parseExprAtom,
+ parseExpression,
+ parseFunctionBody,
+ parseIdentifier,
+ parseLiteral,
+
+} from "../traverser/expression";
+import {
+ baseParseExportStar,
+ parseExport,
+ parseExportFrom,
+ parseExportSpecifiers,
+ parseFunctionParams,
+ parseImport,
+ parseStatement,
+} from "../traverser/statement";
+import {
+ canInsertSemicolon,
+ eatContextual,
+ expect,
+ expectContextual,
+ isContextual,
+ isLookaheadContextual,
+ semicolon,
+ unexpected,
+} from "../traverser/util";
+
+function isMaybeDefaultImport(lookahead) {
+ return (
+ (lookahead.type === tt.name || !!(lookahead.type & TokenType.IS_KEYWORD)) &&
+ lookahead.contextualKeyword !== ContextualKeyword._from
+ );
+}
+
+function flowParseTypeInitialiser(tok) {
+ const oldIsType = pushTypeContext(0);
+ expect(tok || tt.colon);
+ flowParseType();
+ popTypeContext(oldIsType);
+}
+
+function flowParsePredicate() {
+ expect(tt.modulo);
+ expectContextual(ContextualKeyword._checks);
+ if (eat(tt.parenL)) {
+ parseExpression();
+ expect(tt.parenR);
+ }
+}
+
+function flowParseTypeAndPredicateInitialiser() {
+ const oldIsType = pushTypeContext(0);
+ expect(tt.colon);
+ if (match(tt.modulo)) {
+ flowParsePredicate();
+ } else {
+ flowParseType();
+ if (match(tt.modulo)) {
+ flowParsePredicate();
+ }
+ }
+ popTypeContext(oldIsType);
+}
+
+function flowParseDeclareClass() {
+ next();
+ flowParseInterfaceish(/* isClass */ true);
+}
+
+function flowParseDeclareFunction() {
+ next();
+ parseIdentifier();
+
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ }
+
+ expect(tt.parenL);
+ flowParseFunctionTypeParams();
+ expect(tt.parenR);
+
+ flowParseTypeAndPredicateInitialiser();
+
+ semicolon();
+}
+
+function flowParseDeclare() {
+ if (match(tt._class)) {
+ flowParseDeclareClass();
+ } else if (match(tt._function)) {
+ flowParseDeclareFunction();
+ } else if (match(tt._var)) {
+ flowParseDeclareVariable();
+ } else if (eatContextual(ContextualKeyword._module)) {
+ if (eat(tt.dot)) {
+ flowParseDeclareModuleExports();
+ } else {
+ flowParseDeclareModule();
+ }
+ } else if (isContextual(ContextualKeyword._type)) {
+ flowParseDeclareTypeAlias();
+ } else if (isContextual(ContextualKeyword._opaque)) {
+ flowParseDeclareOpaqueType();
+ } else if (isContextual(ContextualKeyword._interface)) {
+ flowParseDeclareInterface();
+ } else if (match(tt._export)) {
+ flowParseDeclareExportDeclaration();
+ } else {
+ unexpected();
+ }
+}
+
+function flowParseDeclareVariable() {
+ next();
+ flowParseTypeAnnotatableIdentifier();
+ semicolon();
+}
+
+function flowParseDeclareModule() {
+ if (match(tt.string)) {
+ parseExprAtom();
+ } else {
+ parseIdentifier();
+ }
+
+ expect(tt.braceL);
+ while (!match(tt.braceR) && !state.error) {
+ if (match(tt._import)) {
+ next();
+ parseImport();
+ } else {
+ unexpected();
+ }
+ }
+ expect(tt.braceR);
+}
+
+function flowParseDeclareExportDeclaration() {
+ expect(tt._export);
+
+ if (eat(tt._default)) {
+ if (match(tt._function) || match(tt._class)) {
+ // declare export default class ...
+ // declare export default function ...
+ flowParseDeclare();
+ } else {
+ // declare export default [type];
+ flowParseType();
+ semicolon();
+ }
+ } else if (
+ match(tt._var) || // declare export var ...
+ match(tt._function) || // declare export function ...
+ match(tt._class) || // declare export class ...
+ isContextual(ContextualKeyword._opaque) // declare export opaque ..
+ ) {
+ flowParseDeclare();
+ } else if (
+ match(tt.star) || // declare export * from ''
+ match(tt.braceL) || // declare export {} ...
+ isContextual(ContextualKeyword._interface) || // declare export interface ...
+ isContextual(ContextualKeyword._type) || // declare export type ...
+ isContextual(ContextualKeyword._opaque) // declare export opaque type ...
+ ) {
+ parseExport();
+ } else {
+ unexpected();
+ }
+}
+
+function flowParseDeclareModuleExports() {
+ expectContextual(ContextualKeyword._exports);
+ flowParseTypeAnnotation();
+ semicolon();
+}
+
+function flowParseDeclareTypeAlias() {
+ next();
+ flowParseTypeAlias();
+}
+
+function flowParseDeclareOpaqueType() {
+ next();
+ flowParseOpaqueType(true);
+}
+
+function flowParseDeclareInterface() {
+ next();
+ flowParseInterfaceish();
+}
+
+// Interfaces
+
+function flowParseInterfaceish(isClass = false) {
+ flowParseRestrictedIdentifier();
+
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ }
+
+ if (eat(tt._extends)) {
+ do {
+ flowParseInterfaceExtends();
+ } while (!isClass && eat(tt.comma));
+ }
+
+ if (isContextual(ContextualKeyword._mixins)) {
+ next();
+ do {
+ flowParseInterfaceExtends();
+ } while (eat(tt.comma));
+ }
+
+ if (isContextual(ContextualKeyword._implements)) {
+ next();
+ do {
+ flowParseInterfaceExtends();
+ } while (eat(tt.comma));
+ }
+
+ flowParseObjectType(isClass, false, isClass);
+}
+
+function flowParseInterfaceExtends() {
+ flowParseQualifiedTypeIdentifier(false);
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterInstantiation();
+ }
+}
+
+function flowParseInterface() {
+ flowParseInterfaceish();
+}
+
+function flowParseRestrictedIdentifier() {
+ parseIdentifier();
+}
+
+function flowParseTypeAlias() {
+ flowParseRestrictedIdentifier();
+
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ }
+
+ flowParseTypeInitialiser(tt.eq);
+ semicolon();
+}
+
+function flowParseOpaqueType(declare) {
+ expectContextual(ContextualKeyword._type);
+ flowParseRestrictedIdentifier();
+
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ }
+
+ // Parse the supertype
+ if (match(tt.colon)) {
+ flowParseTypeInitialiser(tt.colon);
+ }
+
+ if (!declare) {
+ flowParseTypeInitialiser(tt.eq);
+ }
+ semicolon();
+}
+
+function flowParseTypeParameter() {
+ flowParseVariance();
+ flowParseTypeAnnotatableIdentifier();
+
+ if (eat(tt.eq)) {
+ flowParseType();
+ }
+}
+
+export function flowParseTypeParameterDeclaration() {
+ const oldIsType = pushTypeContext(0);
+ // istanbul ignore else: this condition is already checked at all call sites
+ if (match(tt.lessThan) || match(tt.typeParameterStart)) {
+ next();
+ } else {
+ unexpected();
+ }
+
+ do {
+ flowParseTypeParameter();
+ if (!match(tt.greaterThan)) {
+ expect(tt.comma);
+ }
+ } while (!match(tt.greaterThan) && !state.error);
+ expect(tt.greaterThan);
+ popTypeContext(oldIsType);
+}
+
+function flowParseTypeParameterInstantiation() {
+ const oldIsType = pushTypeContext(0);
+ expect(tt.lessThan);
+ while (!match(tt.greaterThan) && !state.error) {
+ flowParseType();
+ if (!match(tt.greaterThan)) {
+ expect(tt.comma);
+ }
+ }
+ expect(tt.greaterThan);
+ popTypeContext(oldIsType);
+}
+
+function flowParseInterfaceType() {
+ expectContextual(ContextualKeyword._interface);
+ if (eat(tt._extends)) {
+ do {
+ flowParseInterfaceExtends();
+ } while (eat(tt.comma));
+ }
+ flowParseObjectType(false, false, false);
+}
+
+function flowParseObjectPropertyKey() {
+ if (match(tt.num) || match(tt.string)) {
+ parseExprAtom();
+ } else {
+ parseIdentifier();
+ }
+}
+
+function flowParseObjectTypeIndexer() {
+ // Note: bracketL has already been consumed
+ if (lookaheadType() === tt.colon) {
+ flowParseObjectPropertyKey();
+ flowParseTypeInitialiser();
+ } else {
+ flowParseType();
+ }
+ expect(tt.bracketR);
+ flowParseTypeInitialiser();
+}
+
+function flowParseObjectTypeInternalSlot() {
+ // Note: both bracketL have already been consumed
+ flowParseObjectPropertyKey();
+ expect(tt.bracketR);
+ expect(tt.bracketR);
+ if (match(tt.lessThan) || match(tt.parenL)) {
+ flowParseObjectTypeMethodish();
+ } else {
+ eat(tt.question);
+ flowParseTypeInitialiser();
+ }
+}
+
+function flowParseObjectTypeMethodish() {
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ }
+
+ expect(tt.parenL);
+ while (!match(tt.parenR) && !match(tt.ellipsis) && !state.error) {
+ flowParseFunctionTypeParam();
+ if (!match(tt.parenR)) {
+ expect(tt.comma);
+ }
+ }
+
+ if (eat(tt.ellipsis)) {
+ flowParseFunctionTypeParam();
+ }
+ expect(tt.parenR);
+ flowParseTypeInitialiser();
+}
+
+function flowParseObjectTypeCallProperty() {
+ flowParseObjectTypeMethodish();
+}
+
+function flowParseObjectType(allowStatic, allowExact, allowProto) {
+ let endDelim;
+ if (allowExact && match(tt.braceBarL)) {
+ expect(tt.braceBarL);
+ endDelim = tt.braceBarR;
+ } else {
+ expect(tt.braceL);
+ endDelim = tt.braceR;
+ }
+
+ while (!match(endDelim) && !state.error) {
+ if (allowProto && isContextual(ContextualKeyword._proto)) {
+ const lookahead = lookaheadType();
+ if (lookahead !== tt.colon && lookahead !== tt.question) {
+ next();
+ allowStatic = false;
+ }
+ }
+ if (allowStatic && isContextual(ContextualKeyword._static)) {
+ const lookahead = lookaheadType();
+ if (lookahead !== tt.colon && lookahead !== tt.question) {
+ next();
+ }
+ }
+
+ flowParseVariance();
+
+ if (eat(tt.bracketL)) {
+ if (eat(tt.bracketL)) {
+ flowParseObjectTypeInternalSlot();
+ } else {
+ flowParseObjectTypeIndexer();
+ }
+ } else if (match(tt.parenL) || match(tt.lessThan)) {
+ flowParseObjectTypeCallProperty();
+ } else {
+ if (isContextual(ContextualKeyword._get) || isContextual(ContextualKeyword._set)) {
+ const lookahead = lookaheadType();
+ if (lookahead === tt.name || lookahead === tt.string || lookahead === tt.num) {
+ next();
+ }
+ }
+
+ flowParseObjectTypeProperty();
+ }
+
+ flowObjectTypeSemicolon();
+ }
+
+ expect(endDelim);
+}
+
+function flowParseObjectTypeProperty() {
+ if (match(tt.ellipsis)) {
+ expect(tt.ellipsis);
+ if (!eat(tt.comma)) {
+ eat(tt.semi);
+ }
+ // Explicit inexact object syntax.
+ if (match(tt.braceR)) {
+ return;
+ }
+ flowParseType();
+ } else {
+ flowParseObjectPropertyKey();
+ if (match(tt.lessThan) || match(tt.parenL)) {
+ // This is a method property
+ flowParseObjectTypeMethodish();
+ } else {
+ eat(tt.question);
+ flowParseTypeInitialiser();
+ }
+ }
+}
+
+function flowObjectTypeSemicolon() {
+ if (!eat(tt.semi) && !eat(tt.comma) && !match(tt.braceR) && !match(tt.braceBarR)) {
+ unexpected();
+ }
+}
+
+function flowParseQualifiedTypeIdentifier(initialIdAlreadyParsed) {
+ if (!initialIdAlreadyParsed) {
+ parseIdentifier();
+ }
+ while (eat(tt.dot)) {
+ parseIdentifier();
+ }
+}
+
+function flowParseGenericType() {
+ flowParseQualifiedTypeIdentifier(true);
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterInstantiation();
+ }
+}
+
+function flowParseTypeofType() {
+ expect(tt._typeof);
+ flowParsePrimaryType();
+}
+
+function flowParseTupleType() {
+ expect(tt.bracketL);
+ // We allow trailing commas
+ while (state.pos < input.length && !match(tt.bracketR)) {
+ flowParseType();
+ if (match(tt.bracketR)) {
+ break;
+ }
+ expect(tt.comma);
+ }
+ expect(tt.bracketR);
+}
+
+function flowParseFunctionTypeParam() {
+ const lookahead = lookaheadType();
+ if (lookahead === tt.colon || lookahead === tt.question) {
+ parseIdentifier();
+ eat(tt.question);
+ flowParseTypeInitialiser();
+ } else {
+ flowParseType();
+ }
+}
+
+function flowParseFunctionTypeParams() {
+ while (!match(tt.parenR) && !match(tt.ellipsis) && !state.error) {
+ flowParseFunctionTypeParam();
+ if (!match(tt.parenR)) {
+ expect(tt.comma);
+ }
+ }
+ if (eat(tt.ellipsis)) {
+ flowParseFunctionTypeParam();
+ }
+}
+
+// The parsing of types roughly parallels the parsing of expressions, and
+// primary types are kind of like primary expressions...they're the
+// primitives with which other types are constructed.
+function flowParsePrimaryType() {
+ let isGroupedType = false;
+ const oldNoAnonFunctionType = state.noAnonFunctionType;
+
+ switch (state.type) {
+ case tt.name: {
+ if (isContextual(ContextualKeyword._interface)) {
+ flowParseInterfaceType();
+ return;
+ }
+ parseIdentifier();
+ flowParseGenericType();
+ return;
+ }
+
+ case tt.braceL:
+ flowParseObjectType(false, false, false);
+ return;
+
+ case tt.braceBarL:
+ flowParseObjectType(false, true, false);
+ return;
+
+ case tt.bracketL:
+ flowParseTupleType();
+ return;
+
+ case tt.lessThan:
+ flowParseTypeParameterDeclaration();
+ expect(tt.parenL);
+ flowParseFunctionTypeParams();
+ expect(tt.parenR);
+ expect(tt.arrow);
+ flowParseType();
+ return;
+
+ case tt.parenL:
+ next();
+
+ // Check to see if this is actually a grouped type
+ if (!match(tt.parenR) && !match(tt.ellipsis)) {
+ if (match(tt.name)) {
+ const token = lookaheadType();
+ isGroupedType = token !== tt.question && token !== tt.colon;
+ } else {
+ isGroupedType = true;
+ }
+ }
+
+ if (isGroupedType) {
+ state.noAnonFunctionType = false;
+ flowParseType();
+ state.noAnonFunctionType = oldNoAnonFunctionType;
+
+ // A `,` or a `) =>` means this is an anonymous function type
+ if (
+ state.noAnonFunctionType ||
+ !(match(tt.comma) || (match(tt.parenR) && lookaheadType() === tt.arrow))
+ ) {
+ expect(tt.parenR);
+ return;
+ } else {
+ // Eat a comma if there is one
+ eat(tt.comma);
+ }
+ }
+
+ flowParseFunctionTypeParams();
+
+ expect(tt.parenR);
+ expect(tt.arrow);
+ flowParseType();
+ return;
+
+ case tt.minus:
+ next();
+ parseLiteral();
+ return;
+
+ case tt.string:
+ case tt.num:
+ case tt._true:
+ case tt._false:
+ case tt._null:
+ case tt._this:
+ case tt._void:
+ case tt.star:
+ next();
+ return;
+
+ default:
+ if (state.type === tt._typeof) {
+ flowParseTypeofType();
+ return;
+ } else if (state.type & TokenType.IS_KEYWORD) {
+ next();
+ state.tokens[state.tokens.length - 1].type = tt.name;
+ return;
+ }
+ }
+
+ unexpected();
+}
+
+function flowParsePostfixType() {
+ flowParsePrimaryType();
+ while (!canInsertSemicolon() && (match(tt.bracketL) || match(tt.questionDot))) {
+ eat(tt.questionDot);
+ expect(tt.bracketL);
+ if (eat(tt.bracketR)) {
+ // Array type
+ } else {
+ // Indexed access type
+ flowParseType();
+ expect(tt.bracketR);
+ }
+ }
+}
+
+function flowParsePrefixType() {
+ if (eat(tt.question)) {
+ flowParsePrefixType();
+ } else {
+ flowParsePostfixType();
+ }
+}
+
+function flowParseAnonFunctionWithoutParens() {
+ flowParsePrefixType();
+ if (!state.noAnonFunctionType && eat(tt.arrow)) {
+ flowParseType();
+ }
+}
+
+function flowParseIntersectionType() {
+ eat(tt.bitwiseAND);
+ flowParseAnonFunctionWithoutParens();
+ while (eat(tt.bitwiseAND)) {
+ flowParseAnonFunctionWithoutParens();
+ }
+}
+
+function flowParseUnionType() {
+ eat(tt.bitwiseOR);
+ flowParseIntersectionType();
+ while (eat(tt.bitwiseOR)) {
+ flowParseIntersectionType();
+ }
+}
+
+function flowParseType() {
+ flowParseUnionType();
+}
+
+export function flowParseTypeAnnotation() {
+ flowParseTypeInitialiser();
+}
+
+function flowParseTypeAnnotatableIdentifier() {
+ parseIdentifier();
+ if (match(tt.colon)) {
+ flowParseTypeAnnotation();
+ }
+}
+
+export function flowParseVariance() {
+ if (match(tt.plus) || match(tt.minus)) {
+ next();
+ state.tokens[state.tokens.length - 1].isType = true;
+ }
+}
+
+// ==================================
+// Overrides
+// ==================================
+
+export function flowParseFunctionBodyAndFinish(funcContextId) {
+ // For arrow functions, `parseArrow` handles the return type itself.
+ if (match(tt.colon)) {
+ flowParseTypeAndPredicateInitialiser();
+ }
+
+ parseFunctionBody(false, funcContextId);
+}
+
+export function flowParseSubscript(
+ startTokenIndex,
+ noCalls,
+ stopState,
+) {
+ if (match(tt.questionDot) && lookaheadType() === tt.lessThan) {
+ if (noCalls) {
+ stopState.stop = true;
+ return;
+ }
+ next();
+ flowParseTypeParameterInstantiation();
+ expect(tt.parenL);
+ parseCallExpressionArguments();
+ return;
+ } else if (!noCalls && match(tt.lessThan)) {
+ const snapshot = state.snapshot();
+ flowParseTypeParameterInstantiation();
+ expect(tt.parenL);
+ parseCallExpressionArguments();
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ } else {
+ return;
+ }
+ }
+ baseParseSubscript(startTokenIndex, noCalls, stopState);
+}
+
+export function flowStartParseNewArguments() {
+ if (match(tt.lessThan)) {
+ const snapshot = state.snapshot();
+ flowParseTypeParameterInstantiation();
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ }
+ }
+}
+
+// interfaces
+export function flowTryParseStatement() {
+ if (match(tt.name) && state.contextualKeyword === ContextualKeyword._interface) {
+ const oldIsType = pushTypeContext(0);
+ next();
+ flowParseInterface();
+ popTypeContext(oldIsType);
+ return true;
+ } else if (isContextual(ContextualKeyword._enum)) {
+ flowParseEnumDeclaration();
+ return true;
+ }
+ return false;
+}
+
+export function flowTryParseExportDefaultExpression() {
+ if (isContextual(ContextualKeyword._enum)) {
+ flowParseEnumDeclaration();
+ return true;
+ }
+ return false;
+}
+
+// declares, interfaces and type aliases
+export function flowParseIdentifierStatement(contextualKeyword) {
+ if (contextualKeyword === ContextualKeyword._declare) {
+ if (
+ match(tt._class) ||
+ match(tt.name) ||
+ match(tt._function) ||
+ match(tt._var) ||
+ match(tt._export)
+ ) {
+ const oldIsType = pushTypeContext(1);
+ flowParseDeclare();
+ popTypeContext(oldIsType);
+ }
+ } else if (match(tt.name)) {
+ if (contextualKeyword === ContextualKeyword._interface) {
+ const oldIsType = pushTypeContext(1);
+ flowParseInterface();
+ popTypeContext(oldIsType);
+ } else if (contextualKeyword === ContextualKeyword._type) {
+ const oldIsType = pushTypeContext(1);
+ flowParseTypeAlias();
+ popTypeContext(oldIsType);
+ } else if (contextualKeyword === ContextualKeyword._opaque) {
+ const oldIsType = pushTypeContext(1);
+ flowParseOpaqueType(false);
+ popTypeContext(oldIsType);
+ }
+ }
+ semicolon();
+}
+
+// export type
+export function flowShouldParseExportDeclaration() {
+ return (
+ isContextual(ContextualKeyword._type) ||
+ isContextual(ContextualKeyword._interface) ||
+ isContextual(ContextualKeyword._opaque) ||
+ isContextual(ContextualKeyword._enum)
+ );
+}
+
+export function flowShouldDisallowExportDefaultSpecifier() {
+ return (
+ match(tt.name) &&
+ (state.contextualKeyword === ContextualKeyword._type ||
+ state.contextualKeyword === ContextualKeyword._interface ||
+ state.contextualKeyword === ContextualKeyword._opaque ||
+ state.contextualKeyword === ContextualKeyword._enum)
+ );
+}
+
+export function flowParseExportDeclaration() {
+ if (isContextual(ContextualKeyword._type)) {
+ const oldIsType = pushTypeContext(1);
+ next();
+
+ if (match(tt.braceL)) {
+ // export type { foo, bar };
+ parseExportSpecifiers();
+ parseExportFrom();
+ } else {
+ // export type Foo = Bar;
+ flowParseTypeAlias();
+ }
+ popTypeContext(oldIsType);
+ } else if (isContextual(ContextualKeyword._opaque)) {
+ const oldIsType = pushTypeContext(1);
+ next();
+ // export opaque type Foo = Bar;
+ flowParseOpaqueType(false);
+ popTypeContext(oldIsType);
+ } else if (isContextual(ContextualKeyword._interface)) {
+ const oldIsType = pushTypeContext(1);
+ next();
+ flowParseInterface();
+ popTypeContext(oldIsType);
+ } else {
+ parseStatement(true);
+ }
+}
+
+export function flowShouldParseExportStar() {
+ return match(tt.star) || (isContextual(ContextualKeyword._type) && lookaheadType() === tt.star);
+}
+
+export function flowParseExportStar() {
+ if (eatContextual(ContextualKeyword._type)) {
+ const oldIsType = pushTypeContext(2);
+ baseParseExportStar();
+ popTypeContext(oldIsType);
+ } else {
+ baseParseExportStar();
+ }
+}
+
+// parse a the super class type parameters and implements
+export function flowAfterParseClassSuper(hasSuper) {
+ if (hasSuper && match(tt.lessThan)) {
+ flowParseTypeParameterInstantiation();
+ }
+ if (isContextual(ContextualKeyword._implements)) {
+ const oldIsType = pushTypeContext(0);
+ next();
+ state.tokens[state.tokens.length - 1].type = tt._implements;
+ do {
+ flowParseRestrictedIdentifier();
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterInstantiation();
+ }
+ } while (eat(tt.comma));
+ popTypeContext(oldIsType);
+ }
+}
+
+// parse type parameters for object method shorthand
+export function flowStartParseObjPropValue() {
+ // method shorthand
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ if (!match(tt.parenL)) unexpected();
+ }
+}
+
+export function flowParseAssignableListItemTypes() {
+ const oldIsType = pushTypeContext(0);
+ eat(tt.question);
+ if (match(tt.colon)) {
+ flowParseTypeAnnotation();
+ }
+ popTypeContext(oldIsType);
+}
+
+// parse typeof and type imports
+export function flowStartParseImportSpecifiers() {
+ if (match(tt._typeof) || isContextual(ContextualKeyword._type)) {
+ const lh = lookaheadTypeAndKeyword();
+ if (isMaybeDefaultImport(lh) || lh.type === tt.braceL || lh.type === tt.star) {
+ next();
+ }
+ }
+}
+
+// parse import-type/typeof shorthand
+export function flowParseImportSpecifier() {
+ const isTypeKeyword =
+ state.contextualKeyword === ContextualKeyword._type || state.type === tt._typeof;
+ if (isTypeKeyword) {
+ next();
+ } else {
+ parseIdentifier();
+ }
+
+ if (isContextual(ContextualKeyword._as) && !isLookaheadContextual(ContextualKeyword._as)) {
+ parseIdentifier();
+ if (isTypeKeyword && !match(tt.name) && !(state.type & TokenType.IS_KEYWORD)) {
+ // `import {type as ,` or `import {type as }`
+ } else {
+ // `import {type as foo`
+ parseIdentifier();
+ }
+ } else {
+ if (isTypeKeyword && (match(tt.name) || !!(state.type & TokenType.IS_KEYWORD))) {
+ // `import {type foo`
+ parseIdentifier();
+ }
+ if (eatContextual(ContextualKeyword._as)) {
+ parseIdentifier();
+ }
+ }
+}
+
+// parse function type parameters - function foo<T>() {}
+export function flowStartParseFunctionParams() {
+ // Originally this checked if the method is a getter/setter, but if it was, we'd crash soon
+ // anyway, so don't try to propagate that information.
+ if (match(tt.lessThan)) {
+ const oldIsType = pushTypeContext(0);
+ flowParseTypeParameterDeclaration();
+ popTypeContext(oldIsType);
+ }
+}
+
+// parse flow type annotations on variable declarator heads - let foo: string = bar
+export function flowAfterParseVarHead() {
+ if (match(tt.colon)) {
+ flowParseTypeAnnotation();
+ }
+}
+
+// parse the return type of an async arrow function - let foo = (async (): number => {});
+export function flowStartParseAsyncArrowFromCallExpression() {
+ if (match(tt.colon)) {
+ const oldNoAnonFunctionType = state.noAnonFunctionType;
+ state.noAnonFunctionType = true;
+ flowParseTypeAnnotation();
+ state.noAnonFunctionType = oldNoAnonFunctionType;
+ }
+}
+
+// We need to support type parameter declarations for arrow functions. This
+// is tricky. There are three situations we need to handle
+//
+// 1. This is either JSX or an arrow function. We'll try JSX first. If that
+// fails, we'll try an arrow function. If that fails, we'll throw the JSX
+// error.
+// 2. This is an arrow function. We'll parse the type parameter declaration,
+// parse the rest, make sure the rest is an arrow function, and go from
+// there
+// 3. This is neither. Just call the super method
+export function flowParseMaybeAssign(noIn, isWithinParens) {
+ if (match(tt.lessThan)) {
+ const snapshot = state.snapshot();
+ let wasArrow = baseParseMaybeAssign(noIn, isWithinParens);
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ state.type = tt.typeParameterStart;
+ } else {
+ return wasArrow;
+ }
+
+ const oldIsType = pushTypeContext(0);
+ flowParseTypeParameterDeclaration();
+ popTypeContext(oldIsType);
+ wasArrow = baseParseMaybeAssign(noIn, isWithinParens);
+ if (wasArrow) {
+ return true;
+ }
+ unexpected();
+ }
+
+ return baseParseMaybeAssign(noIn, isWithinParens);
+}
+
+// handle return types for arrow functions
+export function flowParseArrow() {
+ if (match(tt.colon)) {
+ const oldIsType = pushTypeContext(0);
+ const snapshot = state.snapshot();
+
+ const oldNoAnonFunctionType = state.noAnonFunctionType;
+ state.noAnonFunctionType = true;
+ flowParseTypeAndPredicateInitialiser();
+ state.noAnonFunctionType = oldNoAnonFunctionType;
+
+ if (canInsertSemicolon()) unexpected();
+ if (!match(tt.arrow)) unexpected();
+
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ }
+ popTypeContext(oldIsType);
+ }
+ return eat(tt.arrow);
+}
+
+export function flowParseSubscripts(startTokenIndex, noCalls = false) {
+ if (
+ state.tokens[state.tokens.length - 1].contextualKeyword === ContextualKeyword._async &&
+ match(tt.lessThan)
+ ) {
+ const snapshot = state.snapshot();
+ const wasArrow = parseAsyncArrowWithTypeParameters();
+ if (wasArrow && !state.error) {
+ return;
+ }
+ state.restoreFromSnapshot(snapshot);
+ }
+
+ baseParseSubscripts(startTokenIndex, noCalls);
+}
+
+// Returns true if there was an arrow function here.
+function parseAsyncArrowWithTypeParameters() {
+ state.scopeDepth++;
+ const startTokenIndex = state.tokens.length;
+ parseFunctionParams();
+ if (!parseArrow()) {
+ return false;
+ }
+ parseArrowExpression(startTokenIndex);
+ return true;
+}
+
+function flowParseEnumDeclaration() {
+ expectContextual(ContextualKeyword._enum);
+ state.tokens[state.tokens.length - 1].type = tt._enum;
+ parseIdentifier();
+ flowParseEnumBody();
+}
+
+function flowParseEnumBody() {
+ if (eatContextual(ContextualKeyword._of)) {
+ next();
+ }
+ expect(tt.braceL);
+ flowParseEnumMembers();
+ expect(tt.braceR);
+}
+
+function flowParseEnumMembers() {
+ while (!match(tt.braceR) && !state.error) {
+ if (eat(tt.ellipsis)) {
+ break;
+ }
+ flowParseEnumMember();
+ if (!match(tt.braceR)) {
+ expect(tt.comma);
+ }
+ }
+}
+
+function flowParseEnumMember() {
+ parseIdentifier();
+ if (eat(tt.eq)) {
+ // Flow enum values are always just one token (a string, number, or boolean literal).
+ next();
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/parser/plugins/jsx/index.js b/node_modules/sucrase/dist/esm/parser/plugins/jsx/index.js
new file mode 100644
index 0000000..83f3983
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/plugins/jsx/index.js
@@ -0,0 +1,367 @@
+import {
+ eat,
+ finishToken,
+ getTokenFromCode,
+ IdentifierRole,
+ JSXRole,
+ match,
+ next,
+ skipSpace,
+ Token,
+} from "../../tokenizer/index";
+import {TokenType as tt} from "../../tokenizer/types";
+import {input, isTypeScriptEnabled, state} from "../../traverser/base";
+import {parseExpression, parseMaybeAssign} from "../../traverser/expression";
+import {expect, unexpected} from "../../traverser/util";
+import {charCodes} from "../../util/charcodes";
+import {IS_IDENTIFIER_CHAR, IS_IDENTIFIER_START} from "../../util/identifier";
+import {tsTryParseJSXTypeArgument} from "../typescript";
+
+/**
+ * Read token with JSX contents.
+ *
+ * In addition to detecting jsxTagStart and also regular tokens that might be
+ * part of an expression, this code detects the start and end of text ranges
+ * within JSX children. In order to properly count the number of children, we
+ * distinguish jsxText from jsxEmptyText, which is a text range that simplifies
+ * to the empty string after JSX whitespace trimming.
+ *
+ * It turns out that a JSX text range will simplify to the empty string if and
+ * only if both of these conditions hold:
+ * - The range consists entirely of whitespace characters (only counting space,
+ * tab, \r, and \n).
+ * - The range has at least one newline.
+ * This can be proven by analyzing any implementation of whitespace trimming,
+ * e.g. formatJSXTextLiteral in Sucrase or cleanJSXElementLiteralChild in Babel.
+ */
+function jsxReadToken() {
+ let sawNewline = false;
+ let sawNonWhitespace = false;
+ while (true) {
+ if (state.pos >= input.length) {
+ unexpected("Unterminated JSX contents");
+ return;
+ }
+
+ const ch = input.charCodeAt(state.pos);
+ if (ch === charCodes.lessThan || ch === charCodes.leftCurlyBrace) {
+ if (state.pos === state.start) {
+ if (ch === charCodes.lessThan) {
+ state.pos++;
+ finishToken(tt.jsxTagStart);
+ return;
+ }
+ getTokenFromCode(ch);
+ return;
+ }
+ if (sawNewline && !sawNonWhitespace) {
+ finishToken(tt.jsxEmptyText);
+ } else {
+ finishToken(tt.jsxText);
+ }
+ return;
+ }
+
+ // This is part of JSX text.
+ if (ch === charCodes.lineFeed) {
+ sawNewline = true;
+ } else if (ch !== charCodes.space && ch !== charCodes.carriageReturn && ch !== charCodes.tab) {
+ sawNonWhitespace = true;
+ }
+ state.pos++;
+ }
+}
+
+function jsxReadString(quote) {
+ state.pos++;
+ for (;;) {
+ if (state.pos >= input.length) {
+ unexpected("Unterminated string constant");
+ return;
+ }
+
+ const ch = input.charCodeAt(state.pos);
+ if (ch === quote) {
+ state.pos++;
+ break;
+ }
+ state.pos++;
+ }
+ finishToken(tt.string);
+}
+
+// Read a JSX identifier (valid tag or attribute name).
+//
+// Optimized version since JSX identifiers can't contain
+// escape characters and so can be read as single slice.
+// Also assumes that first character was already checked
+// by isIdentifierStart in readToken.
+
+function jsxReadWord() {
+ let ch;
+ do {
+ if (state.pos > input.length) {
+ unexpected("Unexpectedly reached the end of input.");
+ return;
+ }
+ ch = input.charCodeAt(++state.pos);
+ } while (IS_IDENTIFIER_CHAR[ch] || ch === charCodes.dash);
+ finishToken(tt.jsxName);
+}
+
+// Parse next token as JSX identifier
+function jsxParseIdentifier() {
+ nextJSXTagToken();
+}
+
+// Parse namespaced identifier.
+function jsxParseNamespacedName(identifierRole) {
+ jsxParseIdentifier();
+ if (!eat(tt.colon)) {
+ // Plain identifier, so this is an access.
+ state.tokens[state.tokens.length - 1].identifierRole = identifierRole;
+ return;
+ }
+ // Process the second half of the namespaced name.
+ jsxParseIdentifier();
+}
+
+// Parses element name in any form - namespaced, member
+// or single identifier.
+function jsxParseElementName() {
+ const firstTokenIndex = state.tokens.length;
+ jsxParseNamespacedName(IdentifierRole.Access);
+ let hadDot = false;
+ while (match(tt.dot)) {
+ hadDot = true;
+ nextJSXTagToken();
+ jsxParseIdentifier();
+ }
+ // For tags like <div> with a lowercase letter and no dots, the name is
+ // actually *not* an identifier access, since it's referring to a built-in
+ // tag name. Remove the identifier role in this case so that it's not
+ // accidentally transformed by the imports transform when preserving JSX.
+ if (!hadDot) {
+ const firstToken = state.tokens[firstTokenIndex];
+ const firstChar = input.charCodeAt(firstToken.start);
+ if (firstChar >= charCodes.lowercaseA && firstChar <= charCodes.lowercaseZ) {
+ firstToken.identifierRole = null;
+ }
+ }
+}
+
+// Parses any type of JSX attribute value.
+function jsxParseAttributeValue() {
+ switch (state.type) {
+ case tt.braceL:
+ next();
+ parseExpression();
+ nextJSXTagToken();
+ return;
+
+ case tt.jsxTagStart:
+ jsxParseElement();
+ nextJSXTagToken();
+ return;
+
+ case tt.string:
+ nextJSXTagToken();
+ return;
+
+ default:
+ unexpected("JSX value should be either an expression or a quoted JSX text");
+ }
+}
+
+// Parse JSX spread child, after already processing the {
+// Does not parse the closing }
+function jsxParseSpreadChild() {
+ expect(tt.ellipsis);
+ parseExpression();
+}
+
+// Parses JSX opening tag starting after "<".
+// Returns true if the tag was self-closing.
+// Does not parse the last token.
+function jsxParseOpeningElement(initialTokenIndex) {
+ if (match(tt.jsxTagEnd)) {
+ // This is an open-fragment.
+ return false;
+ }
+ jsxParseElementName();
+ if (isTypeScriptEnabled) {
+ tsTryParseJSXTypeArgument();
+ }
+ let hasSeenPropSpread = false;
+ while (!match(tt.slash) && !match(tt.jsxTagEnd) && !state.error) {
+ if (eat(tt.braceL)) {
+ hasSeenPropSpread = true;
+ expect(tt.ellipsis);
+ parseMaybeAssign();
+ // }
+ nextJSXTagToken();
+ continue;
+ }
+ if (
+ hasSeenPropSpread &&
+ state.end - state.start === 3 &&
+ input.charCodeAt(state.start) === charCodes.lowercaseK &&
+ input.charCodeAt(state.start + 1) === charCodes.lowercaseE &&
+ input.charCodeAt(state.start + 2) === charCodes.lowercaseY
+ ) {
+ state.tokens[initialTokenIndex].jsxRole = JSXRole.KeyAfterPropSpread;
+ }
+ jsxParseNamespacedName(IdentifierRole.ObjectKey);
+ if (match(tt.eq)) {
+ nextJSXTagToken();
+ jsxParseAttributeValue();
+ }
+ }
+ const isSelfClosing = match(tt.slash);
+ if (isSelfClosing) {
+ // /
+ nextJSXTagToken();
+ }
+ return isSelfClosing;
+}
+
+// Parses JSX closing tag starting after "</".
+// Does not parse the last token.
+function jsxParseClosingElement() {
+ if (match(tt.jsxTagEnd)) {
+ // Fragment syntax, so we immediately have a tag end.
+ return;
+ }
+ jsxParseElementName();
+}
+
+// Parses entire JSX element, including its opening tag
+// (starting after "<"), attributes, contents and closing tag.
+// Does not parse the last token.
+function jsxParseElementAt() {
+ const initialTokenIndex = state.tokens.length - 1;
+ state.tokens[initialTokenIndex].jsxRole = JSXRole.NoChildren;
+ let numExplicitChildren = 0;
+ const isSelfClosing = jsxParseOpeningElement(initialTokenIndex);
+ if (!isSelfClosing) {
+ nextJSXExprToken();
+ while (true) {
+ switch (state.type) {
+ case tt.jsxTagStart:
+ nextJSXTagToken();
+ if (match(tt.slash)) {
+ nextJSXTagToken();
+ jsxParseClosingElement();
+ // Key after prop spread takes precedence over number of children,
+ // since it means we switch to createElement, which doesn't care
+ // about number of children.
+ if (state.tokens[initialTokenIndex].jsxRole !== JSXRole.KeyAfterPropSpread) {
+ if (numExplicitChildren === 1) {
+ state.tokens[initialTokenIndex].jsxRole = JSXRole.OneChild;
+ } else if (numExplicitChildren > 1) {
+ state.tokens[initialTokenIndex].jsxRole = JSXRole.StaticChildren;
+ }
+ }
+ return;
+ }
+ numExplicitChildren++;
+ jsxParseElementAt();
+ nextJSXExprToken();
+ break;
+
+ case tt.jsxText:
+ numExplicitChildren++;
+ nextJSXExprToken();
+ break;
+
+ case tt.jsxEmptyText:
+ nextJSXExprToken();
+ break;
+
+ case tt.braceL:
+ next();
+ if (match(tt.ellipsis)) {
+ jsxParseSpreadChild();
+ nextJSXExprToken();
+ // Spread children are a mechanism to explicitly mark children as
+ // static, so count it as 2 children to satisfy the "more than one
+ // child" condition.
+ numExplicitChildren += 2;
+ } else {
+ // If we see {}, this is an empty pseudo-expression that doesn't
+ // count as a child.
+ if (!match(tt.braceR)) {
+ numExplicitChildren++;
+ parseExpression();
+ }
+ nextJSXExprToken();
+ }
+
+ break;
+
+ // istanbul ignore next - should never happen
+ default:
+ unexpected();
+ return;
+ }
+ }
+ }
+}
+
+// Parses entire JSX element from current position.
+// Does not parse the last token.
+export function jsxParseElement() {
+ nextJSXTagToken();
+ jsxParseElementAt();
+}
+
+// ==================================
+// Overrides
+// ==================================
+
+export function nextJSXTagToken() {
+ state.tokens.push(new Token());
+ skipSpace();
+ state.start = state.pos;
+ const code = input.charCodeAt(state.pos);
+
+ if (IS_IDENTIFIER_START[code]) {
+ jsxReadWord();
+ } else if (code === charCodes.quotationMark || code === charCodes.apostrophe) {
+ jsxReadString(code);
+ } else {
+ // The following tokens are just one character each.
+ ++state.pos;
+ switch (code) {
+ case charCodes.greaterThan:
+ finishToken(tt.jsxTagEnd);
+ break;
+ case charCodes.lessThan:
+ finishToken(tt.jsxTagStart);
+ break;
+ case charCodes.slash:
+ finishToken(tt.slash);
+ break;
+ case charCodes.equalsTo:
+ finishToken(tt.eq);
+ break;
+ case charCodes.leftCurlyBrace:
+ finishToken(tt.braceL);
+ break;
+ case charCodes.dot:
+ finishToken(tt.dot);
+ break;
+ case charCodes.colon:
+ finishToken(tt.colon);
+ break;
+ default:
+ unexpected();
+ }
+ }
+}
+
+function nextJSXExprToken() {
+ state.tokens.push(new Token());
+ state.start = state.pos;
+ jsxReadToken();
+}
diff --git a/node_modules/sucrase/dist/esm/parser/plugins/jsx/xhtml.js b/node_modules/sucrase/dist/esm/parser/plugins/jsx/xhtml.js
new file mode 100644
index 0000000..c6a0741
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/plugins/jsx/xhtml.js
@@ -0,0 +1,256 @@
+// Use a Map rather than object to avoid unexpected __proto__ access.
+export default new Map([
+ ["quot", "\u0022"],
+ ["amp", "&"],
+ ["apos", "\u0027"],
+ ["lt", "<"],
+ ["gt", ">"],
+ ["nbsp", "\u00A0"],
+ ["iexcl", "\u00A1"],
+ ["cent", "\u00A2"],
+ ["pound", "\u00A3"],
+ ["curren", "\u00A4"],
+ ["yen", "\u00A5"],
+ ["brvbar", "\u00A6"],
+ ["sect", "\u00A7"],
+ ["uml", "\u00A8"],
+ ["copy", "\u00A9"],
+ ["ordf", "\u00AA"],
+ ["laquo", "\u00AB"],
+ ["not", "\u00AC"],
+ ["shy", "\u00AD"],
+ ["reg", "\u00AE"],
+ ["macr", "\u00AF"],
+ ["deg", "\u00B0"],
+ ["plusmn", "\u00B1"],
+ ["sup2", "\u00B2"],
+ ["sup3", "\u00B3"],
+ ["acute", "\u00B4"],
+ ["micro", "\u00B5"],
+ ["para", "\u00B6"],
+ ["middot", "\u00B7"],
+ ["cedil", "\u00B8"],
+ ["sup1", "\u00B9"],
+ ["ordm", "\u00BA"],
+ ["raquo", "\u00BB"],
+ ["frac14", "\u00BC"],
+ ["frac12", "\u00BD"],
+ ["frac34", "\u00BE"],
+ ["iquest", "\u00BF"],
+ ["Agrave", "\u00C0"],
+ ["Aacute", "\u00C1"],
+ ["Acirc", "\u00C2"],
+ ["Atilde", "\u00C3"],
+ ["Auml", "\u00C4"],
+ ["Aring", "\u00C5"],
+ ["AElig", "\u00C6"],
+ ["Ccedil", "\u00C7"],
+ ["Egrave", "\u00C8"],
+ ["Eacute", "\u00C9"],
+ ["Ecirc", "\u00CA"],
+ ["Euml", "\u00CB"],
+ ["Igrave", "\u00CC"],
+ ["Iacute", "\u00CD"],
+ ["Icirc", "\u00CE"],
+ ["Iuml", "\u00CF"],
+ ["ETH", "\u00D0"],
+ ["Ntilde", "\u00D1"],
+ ["Ograve", "\u00D2"],
+ ["Oacute", "\u00D3"],
+ ["Ocirc", "\u00D4"],
+ ["Otilde", "\u00D5"],
+ ["Ouml", "\u00D6"],
+ ["times", "\u00D7"],
+ ["Oslash", "\u00D8"],
+ ["Ugrave", "\u00D9"],
+ ["Uacute", "\u00DA"],
+ ["Ucirc", "\u00DB"],
+ ["Uuml", "\u00DC"],
+ ["Yacute", "\u00DD"],
+ ["THORN", "\u00DE"],
+ ["szlig", "\u00DF"],
+ ["agrave", "\u00E0"],
+ ["aacute", "\u00E1"],
+ ["acirc", "\u00E2"],
+ ["atilde", "\u00E3"],
+ ["auml", "\u00E4"],
+ ["aring", "\u00E5"],
+ ["aelig", "\u00E6"],
+ ["ccedil", "\u00E7"],
+ ["egrave", "\u00E8"],
+ ["eacute", "\u00E9"],
+ ["ecirc", "\u00EA"],
+ ["euml", "\u00EB"],
+ ["igrave", "\u00EC"],
+ ["iacute", "\u00ED"],
+ ["icirc", "\u00EE"],
+ ["iuml", "\u00EF"],
+ ["eth", "\u00F0"],
+ ["ntilde", "\u00F1"],
+ ["ograve", "\u00F2"],
+ ["oacute", "\u00F3"],
+ ["ocirc", "\u00F4"],
+ ["otilde", "\u00F5"],
+ ["ouml", "\u00F6"],
+ ["divide", "\u00F7"],
+ ["oslash", "\u00F8"],
+ ["ugrave", "\u00F9"],
+ ["uacute", "\u00FA"],
+ ["ucirc", "\u00FB"],
+ ["uuml", "\u00FC"],
+ ["yacute", "\u00FD"],
+ ["thorn", "\u00FE"],
+ ["yuml", "\u00FF"],
+ ["OElig", "\u0152"],
+ ["oelig", "\u0153"],
+ ["Scaron", "\u0160"],
+ ["scaron", "\u0161"],
+ ["Yuml", "\u0178"],
+ ["fnof", "\u0192"],
+ ["circ", "\u02C6"],
+ ["tilde", "\u02DC"],
+ ["Alpha", "\u0391"],
+ ["Beta", "\u0392"],
+ ["Gamma", "\u0393"],
+ ["Delta", "\u0394"],
+ ["Epsilon", "\u0395"],
+ ["Zeta", "\u0396"],
+ ["Eta", "\u0397"],
+ ["Theta", "\u0398"],
+ ["Iota", "\u0399"],
+ ["Kappa", "\u039A"],
+ ["Lambda", "\u039B"],
+ ["Mu", "\u039C"],
+ ["Nu", "\u039D"],
+ ["Xi", "\u039E"],
+ ["Omicron", "\u039F"],
+ ["Pi", "\u03A0"],
+ ["Rho", "\u03A1"],
+ ["Sigma", "\u03A3"],
+ ["Tau", "\u03A4"],
+ ["Upsilon", "\u03A5"],
+ ["Phi", "\u03A6"],
+ ["Chi", "\u03A7"],
+ ["Psi", "\u03A8"],
+ ["Omega", "\u03A9"],
+ ["alpha", "\u03B1"],
+ ["beta", "\u03B2"],
+ ["gamma", "\u03B3"],
+ ["delta", "\u03B4"],
+ ["epsilon", "\u03B5"],
+ ["zeta", "\u03B6"],
+ ["eta", "\u03B7"],
+ ["theta", "\u03B8"],
+ ["iota", "\u03B9"],
+ ["kappa", "\u03BA"],
+ ["lambda", "\u03BB"],
+ ["mu", "\u03BC"],
+ ["nu", "\u03BD"],
+ ["xi", "\u03BE"],
+ ["omicron", "\u03BF"],
+ ["pi", "\u03C0"],
+ ["rho", "\u03C1"],
+ ["sigmaf", "\u03C2"],
+ ["sigma", "\u03C3"],
+ ["tau", "\u03C4"],
+ ["upsilon", "\u03C5"],
+ ["phi", "\u03C6"],
+ ["chi", "\u03C7"],
+ ["psi", "\u03C8"],
+ ["omega", "\u03C9"],
+ ["thetasym", "\u03D1"],
+ ["upsih", "\u03D2"],
+ ["piv", "\u03D6"],
+ ["ensp", "\u2002"],
+ ["emsp", "\u2003"],
+ ["thinsp", "\u2009"],
+ ["zwnj", "\u200C"],
+ ["zwj", "\u200D"],
+ ["lrm", "\u200E"],
+ ["rlm", "\u200F"],
+ ["ndash", "\u2013"],
+ ["mdash", "\u2014"],
+ ["lsquo", "\u2018"],
+ ["rsquo", "\u2019"],
+ ["sbquo", "\u201A"],
+ ["ldquo", "\u201C"],
+ ["rdquo", "\u201D"],
+ ["bdquo", "\u201E"],
+ ["dagger", "\u2020"],
+ ["Dagger", "\u2021"],
+ ["bull", "\u2022"],
+ ["hellip", "\u2026"],
+ ["permil", "\u2030"],
+ ["prime", "\u2032"],
+ ["Prime", "\u2033"],
+ ["lsaquo", "\u2039"],
+ ["rsaquo", "\u203A"],
+ ["oline", "\u203E"],
+ ["frasl", "\u2044"],
+ ["euro", "\u20AC"],
+ ["image", "\u2111"],
+ ["weierp", "\u2118"],
+ ["real", "\u211C"],
+ ["trade", "\u2122"],
+ ["alefsym", "\u2135"],
+ ["larr", "\u2190"],
+ ["uarr", "\u2191"],
+ ["rarr", "\u2192"],
+ ["darr", "\u2193"],
+ ["harr", "\u2194"],
+ ["crarr", "\u21B5"],
+ ["lArr", "\u21D0"],
+ ["uArr", "\u21D1"],
+ ["rArr", "\u21D2"],
+ ["dArr", "\u21D3"],
+ ["hArr", "\u21D4"],
+ ["forall", "\u2200"],
+ ["part", "\u2202"],
+ ["exist", "\u2203"],
+ ["empty", "\u2205"],
+ ["nabla", "\u2207"],
+ ["isin", "\u2208"],
+ ["notin", "\u2209"],
+ ["ni", "\u220B"],
+ ["prod", "\u220F"],
+ ["sum", "\u2211"],
+ ["minus", "\u2212"],
+ ["lowast", "\u2217"],
+ ["radic", "\u221A"],
+ ["prop", "\u221D"],
+ ["infin", "\u221E"],
+ ["ang", "\u2220"],
+ ["and", "\u2227"],
+ ["or", "\u2228"],
+ ["cap", "\u2229"],
+ ["cup", "\u222A"],
+ ["int", "\u222B"],
+ ["there4", "\u2234"],
+ ["sim", "\u223C"],
+ ["cong", "\u2245"],
+ ["asymp", "\u2248"],
+ ["ne", "\u2260"],
+ ["equiv", "\u2261"],
+ ["le", "\u2264"],
+ ["ge", "\u2265"],
+ ["sub", "\u2282"],
+ ["sup", "\u2283"],
+ ["nsub", "\u2284"],
+ ["sube", "\u2286"],
+ ["supe", "\u2287"],
+ ["oplus", "\u2295"],
+ ["otimes", "\u2297"],
+ ["perp", "\u22A5"],
+ ["sdot", "\u22C5"],
+ ["lceil", "\u2308"],
+ ["rceil", "\u2309"],
+ ["lfloor", "\u230A"],
+ ["rfloor", "\u230B"],
+ ["lang", "\u2329"],
+ ["rang", "\u232A"],
+ ["loz", "\u25CA"],
+ ["spades", "\u2660"],
+ ["clubs", "\u2663"],
+ ["hearts", "\u2665"],
+ ["diams", "\u2666"],
+]);
diff --git a/node_modules/sucrase/dist/esm/parser/plugins/types.js b/node_modules/sucrase/dist/esm/parser/plugins/types.js
new file mode 100644
index 0000000..78e4af4
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/plugins/types.js
@@ -0,0 +1,37 @@
+import {eatTypeToken, lookaheadType, match} from "../tokenizer/index";
+import {TokenType as tt} from "../tokenizer/types";
+import {isFlowEnabled, isTypeScriptEnabled} from "../traverser/base";
+import {baseParseConditional} from "../traverser/expression";
+import {flowParseTypeAnnotation} from "./flow";
+import {tsParseTypeAnnotation} from "./typescript";
+
+/**
+ * Common parser code for TypeScript and Flow.
+ */
+
+// An apparent conditional expression could actually be an optional parameter in an arrow function.
+export function typedParseConditional(noIn) {
+ // If we see ?:, this can't possibly be a valid conditional. typedParseParenItem will be called
+ // later to finish off the arrow parameter. We also need to handle bare ? tokens for optional
+ // parameters without type annotations, i.e. ?, and ?) .
+ if (match(tt.question)) {
+ const nextType = lookaheadType();
+ if (nextType === tt.colon || nextType === tt.comma || nextType === tt.parenR) {
+ return;
+ }
+ }
+ baseParseConditional(noIn);
+}
+
+// Note: These "type casts" are *not* valid TS expressions.
+// But we parse them here and change them when completing the arrow function.
+export function typedParseParenItem() {
+ eatTypeToken(tt.question);
+ if (match(tt.colon)) {
+ if (isTypeScriptEnabled) {
+ tsParseTypeAnnotation();
+ } else if (isFlowEnabled) {
+ flowParseTypeAnnotation();
+ }
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/parser/plugins/typescript.js b/node_modules/sucrase/dist/esm/parser/plugins/typescript.js
new file mode 100644
index 0000000..f64ca67
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/plugins/typescript.js
@@ -0,0 +1,1632 @@
+import {
+ eat,
+ finishToken,
+ IdentifierRole,
+ lookaheadType,
+ lookaheadTypeAndKeyword,
+ match,
+ next,
+ nextTemplateToken,
+ popTypeContext,
+ pushTypeContext,
+ rescan_gt,
+} from "../tokenizer/index";
+import {ContextualKeyword} from "../tokenizer/keywords";
+import {TokenType, TokenType as tt} from "../tokenizer/types";
+import {isJSXEnabled, state} from "../traverser/base";
+import {
+ atPossibleAsync,
+ baseParseMaybeAssign,
+ baseParseSubscript,
+ parseCallExpressionArguments,
+ parseExprAtom,
+ parseExpression,
+ parseFunctionBody,
+ parseIdentifier,
+ parseLiteral,
+ parseMaybeAssign,
+ parseMaybeUnary,
+ parsePropertyName,
+ parseTemplate,
+
+} from "../traverser/expression";
+import {parseBindingIdentifier, parseBindingList, parseImportedIdentifier} from "../traverser/lval";
+import {
+ baseParseMaybeDecoratorArguments,
+ parseBlockBody,
+ parseClass,
+ parseFunction,
+ parseFunctionParams,
+ parseStatement,
+ parseVarStatement,
+} from "../traverser/statement";
+import {
+ canInsertSemicolon,
+ eatContextual,
+ expect,
+ expectContextual,
+ hasPrecedingLineBreak,
+ isContextual,
+ isLineTerminator,
+ isLookaheadContextual,
+ semicolon,
+ unexpected,
+} from "../traverser/util";
+import {nextJSXTagToken} from "./jsx";
+
+function tsIsIdentifier() {
+ // TODO: actually a bit more complex in TypeScript, but shouldn't matter.
+ // See https://github.com/Microsoft/TypeScript/issues/15008
+ return match(tt.name);
+}
+
+function isLiteralPropertyName() {
+ return (
+ match(tt.name) ||
+ Boolean(state.type & TokenType.IS_KEYWORD) ||
+ match(tt.string) ||
+ match(tt.num) ||
+ match(tt.bigint) ||
+ match(tt.decimal)
+ );
+}
+
+function tsNextTokenCanFollowModifier() {
+ // Note: TypeScript's implementation is much more complicated because
+ // more things are considered modifiers there.
+ // This implementation only handles modifiers not handled by babylon itself. And "static".
+ // TODO: Would be nice to avoid lookahead. Want a hasLineBreakUpNext() method...
+ const snapshot = state.snapshot();
+
+ next();
+ const canFollowModifier =
+ (match(tt.bracketL) ||
+ match(tt.braceL) ||
+ match(tt.star) ||
+ match(tt.ellipsis) ||
+ match(tt.hash) ||
+ isLiteralPropertyName()) &&
+ !hasPrecedingLineBreak();
+
+ if (canFollowModifier) {
+ return true;
+ } else {
+ state.restoreFromSnapshot(snapshot);
+ return false;
+ }
+}
+
+export function tsParseModifiers(allowedModifiers) {
+ while (true) {
+ const modifier = tsParseModifier(allowedModifiers);
+ if (modifier === null) {
+ break;
+ }
+ }
+}
+
+/** Parses a modifier matching one the given modifier names. */
+export function tsParseModifier(
+ allowedModifiers,
+) {
+ if (!match(tt.name)) {
+ return null;
+ }
+
+ const modifier = state.contextualKeyword;
+ if (allowedModifiers.indexOf(modifier) !== -1 && tsNextTokenCanFollowModifier()) {
+ switch (modifier) {
+ case ContextualKeyword._readonly:
+ state.tokens[state.tokens.length - 1].type = tt._readonly;
+ break;
+ case ContextualKeyword._abstract:
+ state.tokens[state.tokens.length - 1].type = tt._abstract;
+ break;
+ case ContextualKeyword._static:
+ state.tokens[state.tokens.length - 1].type = tt._static;
+ break;
+ case ContextualKeyword._public:
+ state.tokens[state.tokens.length - 1].type = tt._public;
+ break;
+ case ContextualKeyword._private:
+ state.tokens[state.tokens.length - 1].type = tt._private;
+ break;
+ case ContextualKeyword._protected:
+ state.tokens[state.tokens.length - 1].type = tt._protected;
+ break;
+ case ContextualKeyword._override:
+ state.tokens[state.tokens.length - 1].type = tt._override;
+ break;
+ case ContextualKeyword._declare:
+ state.tokens[state.tokens.length - 1].type = tt._declare;
+ break;
+ default:
+ break;
+ }
+ return modifier;
+ }
+ return null;
+}
+
+function tsParseEntityName() {
+ parseIdentifier();
+ while (eat(tt.dot)) {
+ parseIdentifier();
+ }
+}
+
+function tsParseTypeReference() {
+ tsParseEntityName();
+ if (!hasPrecedingLineBreak() && match(tt.lessThan)) {
+ tsParseTypeArguments();
+ }
+}
+
+function tsParseThisTypePredicate() {
+ next();
+ tsParseTypeAnnotation();
+}
+
+function tsParseThisTypeNode() {
+ next();
+}
+
+function tsParseTypeQuery() {
+ expect(tt._typeof);
+ if (match(tt._import)) {
+ tsParseImportType();
+ } else {
+ tsParseEntityName();
+ }
+ if (!hasPrecedingLineBreak() && match(tt.lessThan)) {
+ tsParseTypeArguments();
+ }
+}
+
+function tsParseImportType() {
+ expect(tt._import);
+ expect(tt.parenL);
+ expect(tt.string);
+ expect(tt.parenR);
+ if (eat(tt.dot)) {
+ tsParseEntityName();
+ }
+ if (match(tt.lessThan)) {
+ tsParseTypeArguments();
+ }
+}
+
+function tsParseTypeParameter() {
+ eat(tt._const);
+ const hadIn = eat(tt._in);
+ const hadOut = eatContextual(ContextualKeyword._out);
+ eat(tt._const);
+ if ((hadIn || hadOut) && !match(tt.name)) {
+ // The "in" or "out" keyword must have actually been the type parameter
+ // name, so set it as the name.
+ state.tokens[state.tokens.length - 1].type = tt.name;
+ } else {
+ parseIdentifier();
+ }
+
+ if (eat(tt._extends)) {
+ tsParseType();
+ }
+ if (eat(tt.eq)) {
+ tsParseType();
+ }
+}
+
+export function tsTryParseTypeParameters() {
+ if (match(tt.lessThan)) {
+ tsParseTypeParameters();
+ }
+}
+
+function tsParseTypeParameters() {
+ const oldIsType = pushTypeContext(0);
+ if (match(tt.lessThan) || match(tt.typeParameterStart)) {
+ next();
+ } else {
+ unexpected();
+ }
+
+ while (!eat(tt.greaterThan) && !state.error) {
+ tsParseTypeParameter();
+ eat(tt.comma);
+ }
+ popTypeContext(oldIsType);
+}
+
+// Note: In TypeScript implementation we must provide `yieldContext` and `awaitContext`,
+// but here it's always false, because this is only used for types.
+function tsFillSignature(returnToken) {
+ // Arrow fns *must* have return token (`=>`). Normal functions can omit it.
+ const returnTokenRequired = returnToken === tt.arrow;
+ tsTryParseTypeParameters();
+ expect(tt.parenL);
+ // Create a scope even though we're doing type parsing so we don't accidentally
+ // treat params as top-level bindings.
+ state.scopeDepth++;
+ tsParseBindingListForSignature(false /* isBlockScope */);
+ state.scopeDepth--;
+ if (returnTokenRequired) {
+ tsParseTypeOrTypePredicateAnnotation(returnToken);
+ } else if (match(returnToken)) {
+ tsParseTypeOrTypePredicateAnnotation(returnToken);
+ }
+}
+
+function tsParseBindingListForSignature(isBlockScope) {
+ parseBindingList(tt.parenR, isBlockScope);
+}
+
+function tsParseTypeMemberSemicolon() {
+ if (!eat(tt.comma)) {
+ semicolon();
+ }
+}
+
+function tsParseSignatureMember() {
+ tsFillSignature(tt.colon);
+ tsParseTypeMemberSemicolon();
+}
+
+function tsIsUnambiguouslyIndexSignature() {
+ const snapshot = state.snapshot();
+ next(); // Skip '{'
+ const isIndexSignature = eat(tt.name) && match(tt.colon);
+ state.restoreFromSnapshot(snapshot);
+ return isIndexSignature;
+}
+
+function tsTryParseIndexSignature() {
+ if (!(match(tt.bracketL) && tsIsUnambiguouslyIndexSignature())) {
+ return false;
+ }
+
+ const oldIsType = pushTypeContext(0);
+
+ expect(tt.bracketL);
+ parseIdentifier();
+ tsParseTypeAnnotation();
+ expect(tt.bracketR);
+
+ tsTryParseTypeAnnotation();
+ tsParseTypeMemberSemicolon();
+
+ popTypeContext(oldIsType);
+ return true;
+}
+
+function tsParsePropertyOrMethodSignature(isReadonly) {
+ eat(tt.question);
+
+ if (!isReadonly && (match(tt.parenL) || match(tt.lessThan))) {
+ tsFillSignature(tt.colon);
+ tsParseTypeMemberSemicolon();
+ } else {
+ tsTryParseTypeAnnotation();
+ tsParseTypeMemberSemicolon();
+ }
+}
+
+function tsParseTypeMember() {
+ if (match(tt.parenL) || match(tt.lessThan)) {
+ // call signature
+ tsParseSignatureMember();
+ return;
+ }
+ if (match(tt._new)) {
+ next();
+ if (match(tt.parenL) || match(tt.lessThan)) {
+ // constructor signature
+ tsParseSignatureMember();
+ } else {
+ tsParsePropertyOrMethodSignature(false);
+ }
+ return;
+ }
+ const readonly = !!tsParseModifier([ContextualKeyword._readonly]);
+
+ const found = tsTryParseIndexSignature();
+ if (found) {
+ return;
+ }
+ if (
+ (isContextual(ContextualKeyword._get) || isContextual(ContextualKeyword._set)) &&
+ tsNextTokenCanFollowModifier()
+ ) {
+ // This is a getter/setter on a type. The tsNextTokenCanFollowModifier
+ // function already called next() for us, so continue parsing the name.
+ }
+ parsePropertyName(-1 /* Types don't need context IDs. */);
+ tsParsePropertyOrMethodSignature(readonly);
+}
+
+function tsParseTypeLiteral() {
+ tsParseObjectTypeMembers();
+}
+
+function tsParseObjectTypeMembers() {
+ expect(tt.braceL);
+ while (!eat(tt.braceR) && !state.error) {
+ tsParseTypeMember();
+ }
+}
+
+function tsLookaheadIsStartOfMappedType() {
+ const snapshot = state.snapshot();
+ const isStartOfMappedType = tsIsStartOfMappedType();
+ state.restoreFromSnapshot(snapshot);
+ return isStartOfMappedType;
+}
+
+function tsIsStartOfMappedType() {
+ next();
+ if (eat(tt.plus) || eat(tt.minus)) {
+ return isContextual(ContextualKeyword._readonly);
+ }
+ if (isContextual(ContextualKeyword._readonly)) {
+ next();
+ }
+ if (!match(tt.bracketL)) {
+ return false;
+ }
+ next();
+ if (!tsIsIdentifier()) {
+ return false;
+ }
+ next();
+ return match(tt._in);
+}
+
+function tsParseMappedTypeParameter() {
+ parseIdentifier();
+ expect(tt._in);
+ tsParseType();
+}
+
+function tsParseMappedType() {
+ expect(tt.braceL);
+ if (match(tt.plus) || match(tt.minus)) {
+ next();
+ expectContextual(ContextualKeyword._readonly);
+ } else {
+ eatContextual(ContextualKeyword._readonly);
+ }
+ expect(tt.bracketL);
+ tsParseMappedTypeParameter();
+ if (eatContextual(ContextualKeyword._as)) {
+ tsParseType();
+ }
+ expect(tt.bracketR);
+ if (match(tt.plus) || match(tt.minus)) {
+ next();
+ expect(tt.question);
+ } else {
+ eat(tt.question);
+ }
+ tsTryParseType();
+ semicolon();
+ expect(tt.braceR);
+}
+
+function tsParseTupleType() {
+ expect(tt.bracketL);
+ while (!eat(tt.bracketR) && !state.error) {
+ // Do not validate presence of either none or only labeled elements
+ tsParseTupleElementType();
+ eat(tt.comma);
+ }
+}
+
+function tsParseTupleElementType() {
+ // parses `...TsType[]`
+ if (eat(tt.ellipsis)) {
+ tsParseType();
+ } else {
+ // parses `TsType?`
+ tsParseType();
+ eat(tt.question);
+ }
+
+ // The type we parsed above was actually a label
+ if (eat(tt.colon)) {
+ // Labeled tuple types must affix the label with `...` or `?`, so no need to handle those here
+ tsParseType();
+ }
+}
+
+function tsParseParenthesizedType() {
+ expect(tt.parenL);
+ tsParseType();
+ expect(tt.parenR);
+}
+
+function tsParseTemplateLiteralType() {
+ // Finish `, read quasi
+ nextTemplateToken();
+ // Finish quasi, read ${
+ nextTemplateToken();
+ while (!match(tt.backQuote) && !state.error) {
+ expect(tt.dollarBraceL);
+ tsParseType();
+ // Finish }, read quasi
+ nextTemplateToken();
+ // Finish quasi, read either ${ or `
+ nextTemplateToken();
+ }
+ next();
+}
+
+var FunctionType; (function (FunctionType) {
+ const TSFunctionType = 0; FunctionType[FunctionType["TSFunctionType"] = TSFunctionType] = "TSFunctionType";
+ const TSConstructorType = TSFunctionType + 1; FunctionType[FunctionType["TSConstructorType"] = TSConstructorType] = "TSConstructorType";
+ const TSAbstractConstructorType = TSConstructorType + 1; FunctionType[FunctionType["TSAbstractConstructorType"] = TSAbstractConstructorType] = "TSAbstractConstructorType";
+})(FunctionType || (FunctionType = {}));
+
+function tsParseFunctionOrConstructorType(type) {
+ if (type === FunctionType.TSAbstractConstructorType) {
+ expectContextual(ContextualKeyword._abstract);
+ }
+ if (type === FunctionType.TSConstructorType || type === FunctionType.TSAbstractConstructorType) {
+ expect(tt._new);
+ }
+ const oldInDisallowConditionalTypesContext = state.inDisallowConditionalTypesContext;
+ state.inDisallowConditionalTypesContext = false;
+ tsFillSignature(tt.arrow);
+ state.inDisallowConditionalTypesContext = oldInDisallowConditionalTypesContext;
+}
+
+function tsParseNonArrayType() {
+ switch (state.type) {
+ case tt.name:
+ tsParseTypeReference();
+ return;
+ case tt._void:
+ case tt._null:
+ next();
+ return;
+ case tt.string:
+ case tt.num:
+ case tt.bigint:
+ case tt.decimal:
+ case tt._true:
+ case tt._false:
+ parseLiteral();
+ return;
+ case tt.minus:
+ next();
+ parseLiteral();
+ return;
+ case tt._this: {
+ tsParseThisTypeNode();
+ if (isContextual(ContextualKeyword._is) && !hasPrecedingLineBreak()) {
+ tsParseThisTypePredicate();
+ }
+ return;
+ }
+ case tt._typeof:
+ tsParseTypeQuery();
+ return;
+ case tt._import:
+ tsParseImportType();
+ return;
+ case tt.braceL:
+ if (tsLookaheadIsStartOfMappedType()) {
+ tsParseMappedType();
+ } else {
+ tsParseTypeLiteral();
+ }
+ return;
+ case tt.bracketL:
+ tsParseTupleType();
+ return;
+ case tt.parenL:
+ tsParseParenthesizedType();
+ return;
+ case tt.backQuote:
+ tsParseTemplateLiteralType();
+ return;
+ default:
+ if (state.type & TokenType.IS_KEYWORD) {
+ next();
+ state.tokens[state.tokens.length - 1].type = tt.name;
+ return;
+ }
+ break;
+ }
+
+ unexpected();
+}
+
+function tsParseArrayTypeOrHigher() {
+ tsParseNonArrayType();
+ while (!hasPrecedingLineBreak() && eat(tt.bracketL)) {
+ if (!eat(tt.bracketR)) {
+ // If we hit ] immediately, this is an array type, otherwise it's an indexed access type.
+ tsParseType();
+ expect(tt.bracketR);
+ }
+ }
+}
+
+function tsParseInferType() {
+ expectContextual(ContextualKeyword._infer);
+ parseIdentifier();
+ if (match(tt._extends)) {
+ // Infer type constraints introduce an ambiguity about whether the "extends"
+ // is a constraint for this infer type or is another conditional type.
+ const snapshot = state.snapshot();
+ expect(tt._extends);
+ const oldInDisallowConditionalTypesContext = state.inDisallowConditionalTypesContext;
+ state.inDisallowConditionalTypesContext = true;
+ tsParseType();
+ state.inDisallowConditionalTypesContext = oldInDisallowConditionalTypesContext;
+ if (state.error || (!state.inDisallowConditionalTypesContext && match(tt.question))) {
+ state.restoreFromSnapshot(snapshot);
+ }
+ }
+}
+
+function tsParseTypeOperatorOrHigher() {
+ if (
+ isContextual(ContextualKeyword._keyof) ||
+ isContextual(ContextualKeyword._unique) ||
+ isContextual(ContextualKeyword._readonly)
+ ) {
+ next();
+ tsParseTypeOperatorOrHigher();
+ } else if (isContextual(ContextualKeyword._infer)) {
+ tsParseInferType();
+ } else {
+ const oldInDisallowConditionalTypesContext = state.inDisallowConditionalTypesContext;
+ state.inDisallowConditionalTypesContext = false;
+ tsParseArrayTypeOrHigher();
+ state.inDisallowConditionalTypesContext = oldInDisallowConditionalTypesContext;
+ }
+}
+
+function tsParseIntersectionTypeOrHigher() {
+ eat(tt.bitwiseAND);
+ tsParseTypeOperatorOrHigher();
+ if (match(tt.bitwiseAND)) {
+ while (eat(tt.bitwiseAND)) {
+ tsParseTypeOperatorOrHigher();
+ }
+ }
+}
+
+function tsParseUnionTypeOrHigher() {
+ eat(tt.bitwiseOR);
+ tsParseIntersectionTypeOrHigher();
+ if (match(tt.bitwiseOR)) {
+ while (eat(tt.bitwiseOR)) {
+ tsParseIntersectionTypeOrHigher();
+ }
+ }
+}
+
+function tsIsStartOfFunctionType() {
+ if (match(tt.lessThan)) {
+ return true;
+ }
+ return match(tt.parenL) && tsLookaheadIsUnambiguouslyStartOfFunctionType();
+}
+
+function tsSkipParameterStart() {
+ if (match(tt.name) || match(tt._this)) {
+ next();
+ return true;
+ }
+ // If this is a possible array/object destructure, walk to the matching bracket/brace.
+ // The next token after will tell us definitively whether this is a function param.
+ if (match(tt.braceL) || match(tt.bracketL)) {
+ let depth = 1;
+ next();
+ while (depth > 0 && !state.error) {
+ if (match(tt.braceL) || match(tt.bracketL)) {
+ depth++;
+ } else if (match(tt.braceR) || match(tt.bracketR)) {
+ depth--;
+ }
+ next();
+ }
+ return true;
+ }
+ return false;
+}
+
+function tsLookaheadIsUnambiguouslyStartOfFunctionType() {
+ const snapshot = state.snapshot();
+ const isUnambiguouslyStartOfFunctionType = tsIsUnambiguouslyStartOfFunctionType();
+ state.restoreFromSnapshot(snapshot);
+ return isUnambiguouslyStartOfFunctionType;
+}
+
+function tsIsUnambiguouslyStartOfFunctionType() {
+ next();
+ if (match(tt.parenR) || match(tt.ellipsis)) {
+ // ( )
+ // ( ...
+ return true;
+ }
+ if (tsSkipParameterStart()) {
+ if (match(tt.colon) || match(tt.comma) || match(tt.question) || match(tt.eq)) {
+ // ( xxx :
+ // ( xxx ,
+ // ( xxx ?
+ // ( xxx =
+ return true;
+ }
+ if (match(tt.parenR)) {
+ next();
+ if (match(tt.arrow)) {
+ // ( xxx ) =>
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+function tsParseTypeOrTypePredicateAnnotation(returnToken) {
+ const oldIsType = pushTypeContext(0);
+ expect(returnToken);
+ const finishedReturn = tsParseTypePredicateOrAssertsPrefix();
+ if (!finishedReturn) {
+ tsParseType();
+ }
+ popTypeContext(oldIsType);
+}
+
+function tsTryParseTypeOrTypePredicateAnnotation() {
+ if (match(tt.colon)) {
+ tsParseTypeOrTypePredicateAnnotation(tt.colon);
+ }
+}
+
+export function tsTryParseTypeAnnotation() {
+ if (match(tt.colon)) {
+ tsParseTypeAnnotation();
+ }
+}
+
+function tsTryParseType() {
+ if (eat(tt.colon)) {
+ tsParseType();
+ }
+}
+
+/**
+ * Detect a few special return syntax cases: `x is T`, `asserts x`, `asserts x is T`,
+ * `asserts this is T`.
+ *
+ * Returns true if we parsed the return type, false if there's still a type to be parsed.
+ */
+function tsParseTypePredicateOrAssertsPrefix() {
+ const snapshot = state.snapshot();
+ if (isContextual(ContextualKeyword._asserts)) {
+ // Normally this is `asserts x is T`, but at this point, it might be `asserts is T` (a user-
+ // defined type guard on the `asserts` variable) or just a type called `asserts`.
+ next();
+ if (eatContextual(ContextualKeyword._is)) {
+ // If we see `asserts is`, then this must be of the form `asserts is T`, since
+ // `asserts is is T` isn't valid.
+ tsParseType();
+ return true;
+ } else if (tsIsIdentifier() || match(tt._this)) {
+ next();
+ if (eatContextual(ContextualKeyword._is)) {
+ // If we see `is`, then this is `asserts x is T`. Otherwise, it's `asserts x`.
+ tsParseType();
+ }
+ return true;
+ } else {
+ // Regular type, so bail out and start type parsing from scratch.
+ state.restoreFromSnapshot(snapshot);
+ return false;
+ }
+ } else if (tsIsIdentifier() || match(tt._this)) {
+ // This is a regular identifier, which may or may not have "is" after it.
+ next();
+ if (isContextual(ContextualKeyword._is) && !hasPrecedingLineBreak()) {
+ next();
+ tsParseType();
+ return true;
+ } else {
+ // Regular type, so bail out and start type parsing from scratch.
+ state.restoreFromSnapshot(snapshot);
+ return false;
+ }
+ }
+ return false;
+}
+
+export function tsParseTypeAnnotation() {
+ const oldIsType = pushTypeContext(0);
+ expect(tt.colon);
+ tsParseType();
+ popTypeContext(oldIsType);
+}
+
+export function tsParseType() {
+ tsParseNonConditionalType();
+ if (state.inDisallowConditionalTypesContext || hasPrecedingLineBreak() || !eat(tt._extends)) {
+ return;
+ }
+ // extends type
+ const oldInDisallowConditionalTypesContext = state.inDisallowConditionalTypesContext;
+ state.inDisallowConditionalTypesContext = true;
+ tsParseNonConditionalType();
+ state.inDisallowConditionalTypesContext = oldInDisallowConditionalTypesContext;
+
+ expect(tt.question);
+ // true type
+ tsParseType();
+ expect(tt.colon);
+ // false type
+ tsParseType();
+}
+
+function isAbstractConstructorSignature() {
+ return isContextual(ContextualKeyword._abstract) && lookaheadType() === tt._new;
+}
+
+export function tsParseNonConditionalType() {
+ if (tsIsStartOfFunctionType()) {
+ tsParseFunctionOrConstructorType(FunctionType.TSFunctionType);
+ return;
+ }
+ if (match(tt._new)) {
+ // As in `new () => Date`
+ tsParseFunctionOrConstructorType(FunctionType.TSConstructorType);
+ return;
+ } else if (isAbstractConstructorSignature()) {
+ // As in `abstract new () => Date`
+ tsParseFunctionOrConstructorType(FunctionType.TSAbstractConstructorType);
+ return;
+ }
+ tsParseUnionTypeOrHigher();
+}
+
+export function tsParseTypeAssertion() {
+ const oldIsType = pushTypeContext(1);
+ tsParseType();
+ expect(tt.greaterThan);
+ popTypeContext(oldIsType);
+ parseMaybeUnary();
+}
+
+export function tsTryParseJSXTypeArgument() {
+ if (eat(tt.jsxTagStart)) {
+ state.tokens[state.tokens.length - 1].type = tt.typeParameterStart;
+ const oldIsType = pushTypeContext(1);
+ while (!match(tt.greaterThan) && !state.error) {
+ tsParseType();
+ eat(tt.comma);
+ }
+ // Process >, but the one after needs to be parsed JSX-style.
+ nextJSXTagToken();
+ popTypeContext(oldIsType);
+ }
+}
+
+function tsParseHeritageClause() {
+ while (!match(tt.braceL) && !state.error) {
+ tsParseExpressionWithTypeArguments();
+ eat(tt.comma);
+ }
+}
+
+function tsParseExpressionWithTypeArguments() {
+ // Note: TS uses parseLeftHandSideExpressionOrHigher,
+ // then has grammar errors later if it's not an EntityName.
+ tsParseEntityName();
+ if (match(tt.lessThan)) {
+ tsParseTypeArguments();
+ }
+}
+
+function tsParseInterfaceDeclaration() {
+ parseBindingIdentifier(false);
+ tsTryParseTypeParameters();
+ if (eat(tt._extends)) {
+ tsParseHeritageClause();
+ }
+ tsParseObjectTypeMembers();
+}
+
+function tsParseTypeAliasDeclaration() {
+ parseBindingIdentifier(false);
+ tsTryParseTypeParameters();
+ expect(tt.eq);
+ tsParseType();
+ semicolon();
+}
+
+function tsParseEnumMember() {
+ // Computed property names are grammar errors in an enum, so accept just string literal or identifier.
+ if (match(tt.string)) {
+ parseLiteral();
+ } else {
+ parseIdentifier();
+ }
+ if (eat(tt.eq)) {
+ const eqIndex = state.tokens.length - 1;
+ parseMaybeAssign();
+ state.tokens[eqIndex].rhsEndIndex = state.tokens.length;
+ }
+}
+
+function tsParseEnumDeclaration() {
+ parseBindingIdentifier(false);
+ expect(tt.braceL);
+ while (!eat(tt.braceR) && !state.error) {
+ tsParseEnumMember();
+ eat(tt.comma);
+ }
+}
+
+function tsParseModuleBlock() {
+ expect(tt.braceL);
+ parseBlockBody(/* end */ tt.braceR);
+}
+
+function tsParseModuleOrNamespaceDeclaration() {
+ parseBindingIdentifier(false);
+ if (eat(tt.dot)) {
+ tsParseModuleOrNamespaceDeclaration();
+ } else {
+ tsParseModuleBlock();
+ }
+}
+
+function tsParseAmbientExternalModuleDeclaration() {
+ if (isContextual(ContextualKeyword._global)) {
+ parseIdentifier();
+ } else if (match(tt.string)) {
+ parseExprAtom();
+ } else {
+ unexpected();
+ }
+
+ if (match(tt.braceL)) {
+ tsParseModuleBlock();
+ } else {
+ semicolon();
+ }
+}
+
+export function tsParseImportEqualsDeclaration() {
+ parseImportedIdentifier();
+ expect(tt.eq);
+ tsParseModuleReference();
+ semicolon();
+}
+
+function tsIsExternalModuleReference() {
+ return isContextual(ContextualKeyword._require) && lookaheadType() === tt.parenL;
+}
+
+function tsParseModuleReference() {
+ if (tsIsExternalModuleReference()) {
+ tsParseExternalModuleReference();
+ } else {
+ tsParseEntityName();
+ }
+}
+
+function tsParseExternalModuleReference() {
+ expectContextual(ContextualKeyword._require);
+ expect(tt.parenL);
+ if (!match(tt.string)) {
+ unexpected();
+ }
+ parseLiteral();
+ expect(tt.parenR);
+}
+
+// Utilities
+
+// Returns true if a statement matched.
+function tsTryParseDeclare() {
+ if (isLineTerminator()) {
+ return false;
+ }
+ switch (state.type) {
+ case tt._function: {
+ const oldIsType = pushTypeContext(1);
+ next();
+ // We don't need to precisely get the function start here, since it's only used to mark
+ // the function as a type if it's bodiless, and it's already a type here.
+ const functionStart = state.start;
+ parseFunction(functionStart, /* isStatement */ true);
+ popTypeContext(oldIsType);
+ return true;
+ }
+ case tt._class: {
+ const oldIsType = pushTypeContext(1);
+ parseClass(/* isStatement */ true, /* optionalId */ false);
+ popTypeContext(oldIsType);
+ return true;
+ }
+ case tt._const: {
+ if (match(tt._const) && isLookaheadContextual(ContextualKeyword._enum)) {
+ const oldIsType = pushTypeContext(1);
+ // `const enum = 0;` not allowed because "enum" is a strict mode reserved word.
+ expect(tt._const);
+ expectContextual(ContextualKeyword._enum);
+ state.tokens[state.tokens.length - 1].type = tt._enum;
+ tsParseEnumDeclaration();
+ popTypeContext(oldIsType);
+ return true;
+ }
+ }
+ // falls through
+ case tt._var:
+ case tt._let: {
+ const oldIsType = pushTypeContext(1);
+ parseVarStatement(state.type !== tt._var);
+ popTypeContext(oldIsType);
+ return true;
+ }
+ case tt.name: {
+ const oldIsType = pushTypeContext(1);
+ const contextualKeyword = state.contextualKeyword;
+ let matched = false;
+ if (contextualKeyword === ContextualKeyword._global) {
+ tsParseAmbientExternalModuleDeclaration();
+ matched = true;
+ } else {
+ matched = tsParseDeclaration(contextualKeyword, /* isBeforeToken */ true);
+ }
+ popTypeContext(oldIsType);
+ return matched;
+ }
+ default:
+ return false;
+ }
+}
+
+// Note: this won't be called unless the keyword is allowed in `shouldParseExportDeclaration`.
+// Returns true if it matched a declaration.
+function tsTryParseExportDeclaration() {
+ return tsParseDeclaration(state.contextualKeyword, /* isBeforeToken */ true);
+}
+
+// Returns true if it matched a statement.
+function tsParseExpressionStatement(contextualKeyword) {
+ switch (contextualKeyword) {
+ case ContextualKeyword._declare: {
+ const declareTokenIndex = state.tokens.length - 1;
+ const matched = tsTryParseDeclare();
+ if (matched) {
+ state.tokens[declareTokenIndex].type = tt._declare;
+ return true;
+ }
+ break;
+ }
+ case ContextualKeyword._global:
+ // `global { }` (with no `declare`) may appear inside an ambient module declaration.
+ // Would like to use tsParseAmbientExternalModuleDeclaration here, but already ran past "global".
+ if (match(tt.braceL)) {
+ tsParseModuleBlock();
+ return true;
+ }
+ break;
+
+ default:
+ return tsParseDeclaration(contextualKeyword, /* isBeforeToken */ false);
+ }
+ return false;
+}
+
+/**
+ * Common code for parsing a declaration.
+ *
+ * isBeforeToken indicates that the current parser state is at the contextual
+ * keyword (and that it is not yet emitted) rather than reading the token after
+ * it. When isBeforeToken is true, we may be preceded by an `export` token and
+ * should include that token in a type context we create, e.g. to handle
+ * `export interface` or `export type`. (This is a bit of a hack and should be
+ * cleaned up at some point.)
+ *
+ * Returns true if it matched a declaration.
+ */
+function tsParseDeclaration(contextualKeyword, isBeforeToken) {
+ switch (contextualKeyword) {
+ case ContextualKeyword._abstract:
+ if (tsCheckLineTerminator(isBeforeToken) && match(tt._class)) {
+ state.tokens[state.tokens.length - 1].type = tt._abstract;
+ parseClass(/* isStatement */ true, /* optionalId */ false);
+ return true;
+ }
+ break;
+
+ case ContextualKeyword._enum:
+ if (tsCheckLineTerminator(isBeforeToken) && match(tt.name)) {
+ state.tokens[state.tokens.length - 1].type = tt._enum;
+ tsParseEnumDeclaration();
+ return true;
+ }
+ break;
+
+ case ContextualKeyword._interface:
+ if (tsCheckLineTerminator(isBeforeToken) && match(tt.name)) {
+ // `next` is true in "export" and "declare" contexts, so we want to remove that token
+ // as well.
+ const oldIsType = pushTypeContext(isBeforeToken ? 2 : 1);
+ tsParseInterfaceDeclaration();
+ popTypeContext(oldIsType);
+ return true;
+ }
+ break;
+
+ case ContextualKeyword._module:
+ if (tsCheckLineTerminator(isBeforeToken)) {
+ if (match(tt.string)) {
+ const oldIsType = pushTypeContext(isBeforeToken ? 2 : 1);
+ tsParseAmbientExternalModuleDeclaration();
+ popTypeContext(oldIsType);
+ return true;
+ } else if (match(tt.name)) {
+ const oldIsType = pushTypeContext(isBeforeToken ? 2 : 1);
+ tsParseModuleOrNamespaceDeclaration();
+ popTypeContext(oldIsType);
+ return true;
+ }
+ }
+ break;
+
+ case ContextualKeyword._namespace:
+ if (tsCheckLineTerminator(isBeforeToken) && match(tt.name)) {
+ const oldIsType = pushTypeContext(isBeforeToken ? 2 : 1);
+ tsParseModuleOrNamespaceDeclaration();
+ popTypeContext(oldIsType);
+ return true;
+ }
+ break;
+
+ case ContextualKeyword._type:
+ if (tsCheckLineTerminator(isBeforeToken) && match(tt.name)) {
+ const oldIsType = pushTypeContext(isBeforeToken ? 2 : 1);
+ tsParseTypeAliasDeclaration();
+ popTypeContext(oldIsType);
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return false;
+}
+
+function tsCheckLineTerminator(isBeforeToken) {
+ if (isBeforeToken) {
+ // Babel checks hasFollowingLineBreak here and returns false, but this
+ // doesn't actually come up, e.g. `export interface` can never be on its own
+ // line in valid code.
+ next();
+ return true;
+ } else {
+ return !isLineTerminator();
+ }
+}
+
+// Returns true if there was a generic async arrow function.
+function tsTryParseGenericAsyncArrowFunction() {
+ const snapshot = state.snapshot();
+
+ tsParseTypeParameters();
+ parseFunctionParams();
+ tsTryParseTypeOrTypePredicateAnnotation();
+ expect(tt.arrow);
+
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ return false;
+ }
+
+ parseFunctionBody(true);
+ return true;
+}
+
+/**
+ * If necessary, hack the tokenizer state so that this bitshift was actually a
+ * less-than token, then keep parsing. This should only be used in situations
+ * where we restore from snapshot on error (which reverts this change) or
+ * where bitshift would be illegal anyway (e.g. in a class "extends" clause).
+ *
+ * This hack is useful to handle situations like foo<<T>() => void>() where
+ * there can legitimately be two open-angle-brackets in a row in TS.
+ */
+function tsParseTypeArgumentsWithPossibleBitshift() {
+ if (state.type === tt.bitShiftL) {
+ state.pos -= 1;
+ finishToken(tt.lessThan);
+ }
+ tsParseTypeArguments();
+}
+
+function tsParseTypeArguments() {
+ const oldIsType = pushTypeContext(0);
+ expect(tt.lessThan);
+ while (!match(tt.greaterThan) && !state.error) {
+ tsParseType();
+ eat(tt.comma);
+ }
+ if (!oldIsType) {
+ // If the type arguments are present in an expression context, e.g.
+ // f<number>(), then the > sign should be tokenized as a non-type token.
+ // In particular, f(a < b, c >= d) should parse the >= as a single token,
+ // resulting in a syntax error and fallback to the non-type-args
+ // interpretation. In the success case, even though the > is tokenized as a
+ // non-type token, it still must be marked as a type token so that it is
+ // erased.
+ popTypeContext(oldIsType);
+ rescan_gt();
+ expect(tt.greaterThan);
+ state.tokens[state.tokens.length - 1].isType = true;
+ } else {
+ expect(tt.greaterThan);
+ popTypeContext(oldIsType);
+ }
+}
+
+export function tsIsDeclarationStart() {
+ if (match(tt.name)) {
+ switch (state.contextualKeyword) {
+ case ContextualKeyword._abstract:
+ case ContextualKeyword._declare:
+ case ContextualKeyword._enum:
+ case ContextualKeyword._interface:
+ case ContextualKeyword._module:
+ case ContextualKeyword._namespace:
+ case ContextualKeyword._type:
+ return true;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
+
+// ======================================================
+// OVERRIDES
+// ======================================================
+
+export function tsParseFunctionBodyAndFinish(functionStart, funcContextId) {
+ // For arrow functions, `parseArrow` handles the return type itself.
+ if (match(tt.colon)) {
+ tsParseTypeOrTypePredicateAnnotation(tt.colon);
+ }
+
+ // The original code checked the node type to make sure this function type allows a missing
+ // body, but we skip that to avoid sending around the node type. We instead just use the
+ // allowExpressionBody boolean to make sure it's not an arrow function.
+ if (!match(tt.braceL) && isLineTerminator()) {
+ // Retroactively mark the function declaration as a type.
+ let i = state.tokens.length - 1;
+ while (
+ i >= 0 &&
+ (state.tokens[i].start >= functionStart ||
+ state.tokens[i].type === tt._default ||
+ state.tokens[i].type === tt._export)
+ ) {
+ state.tokens[i].isType = true;
+ i--;
+ }
+ return;
+ }
+
+ parseFunctionBody(false, funcContextId);
+}
+
+export function tsParseSubscript(
+ startTokenIndex,
+ noCalls,
+ stopState,
+) {
+ if (!hasPrecedingLineBreak() && eat(tt.bang)) {
+ state.tokens[state.tokens.length - 1].type = tt.nonNullAssertion;
+ return;
+ }
+
+ if (match(tt.lessThan) || match(tt.bitShiftL)) {
+ // There are number of things we are going to "maybe" parse, like type arguments on
+ // tagged template expressions. If any of them fail, walk it back and continue.
+ const snapshot = state.snapshot();
+
+ if (!noCalls && atPossibleAsync()) {
+ // Almost certainly this is a generic async function `async <T>() => ...
+ // But it might be a call with a type argument `async<T>();`
+ const asyncArrowFn = tsTryParseGenericAsyncArrowFunction();
+ if (asyncArrowFn) {
+ return;
+ }
+ }
+ tsParseTypeArgumentsWithPossibleBitshift();
+ if (!noCalls && eat(tt.parenL)) {
+ // With f<T>(), the subscriptStartIndex marker is on the ( token.
+ state.tokens[state.tokens.length - 1].subscriptStartIndex = startTokenIndex;
+ parseCallExpressionArguments();
+ } else if (match(tt.backQuote)) {
+ // Tagged template with a type argument.
+ parseTemplate();
+ } else if (
+ // The remaining possible case is an instantiation expression, e.g.
+ // Array<number> . Check for a few cases that would disqualify it and
+ // cause us to bail out.
+ // a<b>>c is not (a<b>)>c, but a<(b>>c)
+ state.type === tt.greaterThan ||
+ // a<b>c is (a<b)>c
+ (state.type !== tt.parenL &&
+ Boolean(state.type & TokenType.IS_EXPRESSION_START) &&
+ !hasPrecedingLineBreak())
+ ) {
+ // Bail out. We have something like a<b>c, which is not an expression with
+ // type arguments but an (a < b) > c comparison.
+ unexpected();
+ }
+
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ } else {
+ return;
+ }
+ } else if (!noCalls && match(tt.questionDot) && lookaheadType() === tt.lessThan) {
+ // If we see f?.<, then this must be an optional call with a type argument.
+ next();
+ state.tokens[startTokenIndex].isOptionalChainStart = true;
+ // With f?.<T>(), the subscriptStartIndex marker is on the ?. token.
+ state.tokens[state.tokens.length - 1].subscriptStartIndex = startTokenIndex;
+
+ tsParseTypeArguments();
+ expect(tt.parenL);
+ parseCallExpressionArguments();
+ }
+ baseParseSubscript(startTokenIndex, noCalls, stopState);
+}
+
+export function tsTryParseExport() {
+ if (eat(tt._import)) {
+ // One of these cases:
+ // export import A = B;
+ // export import type A = require("A");
+ if (isContextual(ContextualKeyword._type) && lookaheadType() !== tt.eq) {
+ // Eat a `type` token, unless it's actually an identifier name.
+ expectContextual(ContextualKeyword._type);
+ }
+ tsParseImportEqualsDeclaration();
+ return true;
+ } else if (eat(tt.eq)) {
+ // `export = x;`
+ parseExpression();
+ semicolon();
+ return true;
+ } else if (eatContextual(ContextualKeyword._as)) {
+ // `export as namespace A;`
+ // See `parseNamespaceExportDeclaration` in TypeScript's own parser
+ expectContextual(ContextualKeyword._namespace);
+ parseIdentifier();
+ semicolon();
+ return true;
+ } else {
+ if (isContextual(ContextualKeyword._type)) {
+ const nextType = lookaheadType();
+ // export type {foo} from 'a';
+ // export type * from 'a';'
+ // export type * as ns from 'a';'
+ if (nextType === tt.braceL || nextType === tt.star) {
+ next();
+ }
+ }
+ return false;
+ }
+}
+
+/**
+ * Parse a TS import specifier, which may be prefixed with "type" and may be of
+ * the form `foo as bar`.
+ *
+ * The number of identifier-like tokens we see happens to be enough to uniquely
+ * identify the form, so simply count the number of identifiers rather than
+ * matching the words `type` or `as`. This is particularly important because
+ * `type` and `as` could each actually be plain identifiers rather than
+ * keywords.
+ */
+export function tsParseImportSpecifier() {
+ parseIdentifier();
+ if (match(tt.comma) || match(tt.braceR)) {
+ // import {foo}
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ImportDeclaration;
+ return;
+ }
+ parseIdentifier();
+ if (match(tt.comma) || match(tt.braceR)) {
+ // import {type foo}
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ImportDeclaration;
+ state.tokens[state.tokens.length - 2].isType = true;
+ state.tokens[state.tokens.length - 1].isType = true;
+ return;
+ }
+ parseIdentifier();
+ if (match(tt.comma) || match(tt.braceR)) {
+ // import {foo as bar}
+ state.tokens[state.tokens.length - 3].identifierRole = IdentifierRole.ImportAccess;
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ImportDeclaration;
+ return;
+ }
+ parseIdentifier();
+ // import {type foo as bar}
+ state.tokens[state.tokens.length - 3].identifierRole = IdentifierRole.ImportAccess;
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ImportDeclaration;
+ state.tokens[state.tokens.length - 4].isType = true;
+ state.tokens[state.tokens.length - 3].isType = true;
+ state.tokens[state.tokens.length - 2].isType = true;
+ state.tokens[state.tokens.length - 1].isType = true;
+}
+
+/**
+ * Just like named import specifiers, export specifiers can have from 1 to 4
+ * tokens, inclusive, and the number of tokens determines the role of each token.
+ */
+export function tsParseExportSpecifier() {
+ parseIdentifier();
+ if (match(tt.comma) || match(tt.braceR)) {
+ // export {foo}
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ExportAccess;
+ return;
+ }
+ parseIdentifier();
+ if (match(tt.comma) || match(tt.braceR)) {
+ // export {type foo}
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ExportAccess;
+ state.tokens[state.tokens.length - 2].isType = true;
+ state.tokens[state.tokens.length - 1].isType = true;
+ return;
+ }
+ parseIdentifier();
+ if (match(tt.comma) || match(tt.braceR)) {
+ // export {foo as bar}
+ state.tokens[state.tokens.length - 3].identifierRole = IdentifierRole.ExportAccess;
+ return;
+ }
+ parseIdentifier();
+ // export {type foo as bar}
+ state.tokens[state.tokens.length - 3].identifierRole = IdentifierRole.ExportAccess;
+ state.tokens[state.tokens.length - 4].isType = true;
+ state.tokens[state.tokens.length - 3].isType = true;
+ state.tokens[state.tokens.length - 2].isType = true;
+ state.tokens[state.tokens.length - 1].isType = true;
+}
+
+export function tsTryParseExportDefaultExpression() {
+ if (isContextual(ContextualKeyword._abstract) && lookaheadType() === tt._class) {
+ state.type = tt._abstract;
+ next(); // Skip "abstract"
+ parseClass(true, true);
+ return true;
+ }
+ if (isContextual(ContextualKeyword._interface)) {
+ // Make sure "export default" are considered type tokens so the whole thing is removed.
+ const oldIsType = pushTypeContext(2);
+ tsParseDeclaration(ContextualKeyword._interface, true);
+ popTypeContext(oldIsType);
+ return true;
+ }
+ return false;
+}
+
+export function tsTryParseStatementContent() {
+ if (state.type === tt._const) {
+ const ahead = lookaheadTypeAndKeyword();
+ if (ahead.type === tt.name && ahead.contextualKeyword === ContextualKeyword._enum) {
+ expect(tt._const);
+ expectContextual(ContextualKeyword._enum);
+ state.tokens[state.tokens.length - 1].type = tt._enum;
+ tsParseEnumDeclaration();
+ return true;
+ }
+ }
+ return false;
+}
+
+export function tsTryParseClassMemberWithIsStatic(isStatic) {
+ const memberStartIndexAfterStatic = state.tokens.length;
+ tsParseModifiers([
+ ContextualKeyword._abstract,
+ ContextualKeyword._readonly,
+ ContextualKeyword._declare,
+ ContextualKeyword._static,
+ ContextualKeyword._override,
+ ]);
+
+ const modifiersEndIndex = state.tokens.length;
+ const found = tsTryParseIndexSignature();
+ if (found) {
+ // Index signatures are type declarations, so set the modifier tokens as
+ // type tokens. Most tokens could be assumed to be type tokens, but `static`
+ // is ambiguous unless we set it explicitly here.
+ const memberStartIndex = isStatic
+ ? memberStartIndexAfterStatic - 1
+ : memberStartIndexAfterStatic;
+ for (let i = memberStartIndex; i < modifiersEndIndex; i++) {
+ state.tokens[i].isType = true;
+ }
+ return true;
+ }
+ return false;
+}
+
+// Note: The reason we do this in `parseIdentifierStatement` and not `parseStatement`
+// is that e.g. `type()` is valid JS, so we must try parsing that first.
+// If it's really a type, we will parse `type` as the statement, and can correct it here
+// by parsing the rest.
+export function tsParseIdentifierStatement(contextualKeyword) {
+ const matched = tsParseExpressionStatement(contextualKeyword);
+ if (!matched) {
+ semicolon();
+ }
+}
+
+export function tsParseExportDeclaration() {
+ // "export declare" is equivalent to just "export".
+ const isDeclare = eatContextual(ContextualKeyword._declare);
+ if (isDeclare) {
+ state.tokens[state.tokens.length - 1].type = tt._declare;
+ }
+
+ let matchedDeclaration = false;
+ if (match(tt.name)) {
+ if (isDeclare) {
+ const oldIsType = pushTypeContext(2);
+ matchedDeclaration = tsTryParseExportDeclaration();
+ popTypeContext(oldIsType);
+ } else {
+ matchedDeclaration = tsTryParseExportDeclaration();
+ }
+ }
+ if (!matchedDeclaration) {
+ if (isDeclare) {
+ const oldIsType = pushTypeContext(2);
+ parseStatement(true);
+ popTypeContext(oldIsType);
+ } else {
+ parseStatement(true);
+ }
+ }
+}
+
+export function tsAfterParseClassSuper(hasSuper) {
+ if (hasSuper && (match(tt.lessThan) || match(tt.bitShiftL))) {
+ tsParseTypeArgumentsWithPossibleBitshift();
+ }
+ if (eatContextual(ContextualKeyword._implements)) {
+ state.tokens[state.tokens.length - 1].type = tt._implements;
+ const oldIsType = pushTypeContext(1);
+ tsParseHeritageClause();
+ popTypeContext(oldIsType);
+ }
+}
+
+export function tsStartParseObjPropValue() {
+ tsTryParseTypeParameters();
+}
+
+export function tsStartParseFunctionParams() {
+ tsTryParseTypeParameters();
+}
+
+// `let x: number;`
+export function tsAfterParseVarHead() {
+ const oldIsType = pushTypeContext(0);
+ if (!hasPrecedingLineBreak()) {
+ eat(tt.bang);
+ }
+ tsTryParseTypeAnnotation();
+ popTypeContext(oldIsType);
+}
+
+// parse the return type of an async arrow function - let foo = (async (): number => {});
+export function tsStartParseAsyncArrowFromCallExpression() {
+ if (match(tt.colon)) {
+ tsParseTypeAnnotation();
+ }
+}
+
+// Returns true if the expression was an arrow function.
+export function tsParseMaybeAssign(noIn, isWithinParens) {
+ // Note: When the JSX plugin is on, type assertions (`<T> x`) aren't valid syntax.
+ if (isJSXEnabled) {
+ return tsParseMaybeAssignWithJSX(noIn, isWithinParens);
+ } else {
+ return tsParseMaybeAssignWithoutJSX(noIn, isWithinParens);
+ }
+}
+
+export function tsParseMaybeAssignWithJSX(noIn, isWithinParens) {
+ if (!match(tt.lessThan)) {
+ return baseParseMaybeAssign(noIn, isWithinParens);
+ }
+
+ // Prefer to parse JSX if possible. But may be an arrow fn.
+ const snapshot = state.snapshot();
+ let wasArrow = baseParseMaybeAssign(noIn, isWithinParens);
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ } else {
+ return wasArrow;
+ }
+
+ // Otherwise, try as type-parameterized arrow function.
+ state.type = tt.typeParameterStart;
+ // This is similar to TypeScript's `tryParseParenthesizedArrowFunctionExpression`.
+ tsParseTypeParameters();
+ wasArrow = baseParseMaybeAssign(noIn, isWithinParens);
+ if (!wasArrow) {
+ unexpected();
+ }
+
+ return wasArrow;
+}
+
+export function tsParseMaybeAssignWithoutJSX(noIn, isWithinParens) {
+ if (!match(tt.lessThan)) {
+ return baseParseMaybeAssign(noIn, isWithinParens);
+ }
+
+ const snapshot = state.snapshot();
+ // This is similar to TypeScript's `tryParseParenthesizedArrowFunctionExpression`.
+ tsParseTypeParameters();
+ const wasArrow = baseParseMaybeAssign(noIn, isWithinParens);
+ if (!wasArrow) {
+ unexpected();
+ }
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ } else {
+ return wasArrow;
+ }
+
+ // Try parsing a type cast instead of an arrow function.
+ // This will start with a type assertion (via parseMaybeUnary).
+ // But don't directly call `tsParseTypeAssertion` because we want to handle any binary after it.
+ return baseParseMaybeAssign(noIn, isWithinParens);
+}
+
+export function tsParseArrow() {
+ if (match(tt.colon)) {
+ // This is different from how the TS parser does it.
+ // TS uses lookahead. Babylon parses it as a parenthesized expression and converts.
+ const snapshot = state.snapshot();
+
+ tsParseTypeOrTypePredicateAnnotation(tt.colon);
+ if (canInsertSemicolon()) unexpected();
+ if (!match(tt.arrow)) unexpected();
+
+ if (state.error) {
+ state.restoreFromSnapshot(snapshot);
+ }
+ }
+ return eat(tt.arrow);
+}
+
+// Allow type annotations inside of a parameter list.
+export function tsParseAssignableListItemTypes() {
+ const oldIsType = pushTypeContext(0);
+ eat(tt.question);
+ tsTryParseTypeAnnotation();
+ popTypeContext(oldIsType);
+}
+
+export function tsParseMaybeDecoratorArguments() {
+ if (match(tt.lessThan) || match(tt.bitShiftL)) {
+ tsParseTypeArgumentsWithPossibleBitshift();
+ }
+ baseParseMaybeDecoratorArguments();
+}
diff --git a/node_modules/sucrase/dist/esm/parser/tokenizer/index.js b/node_modules/sucrase/dist/esm/parser/tokenizer/index.js
new file mode 100644
index 0000000..69f286a
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/tokenizer/index.js
@@ -0,0 +1,1004 @@
+/* eslint max-len: 0 */
+
+import {input, isFlowEnabled, state} from "../traverser/base";
+import {unexpected} from "../traverser/util";
+import {charCodes} from "../util/charcodes";
+import {IS_IDENTIFIER_CHAR, IS_IDENTIFIER_START} from "../util/identifier";
+import {IS_WHITESPACE, skipWhiteSpace} from "../util/whitespace";
+import {ContextualKeyword} from "./keywords";
+import readWord from "./readWord";
+import { TokenType as tt} from "./types";
+
+export var IdentifierRole; (function (IdentifierRole) {
+ const Access = 0; IdentifierRole[IdentifierRole["Access"] = Access] = "Access";
+ const ExportAccess = Access + 1; IdentifierRole[IdentifierRole["ExportAccess"] = ExportAccess] = "ExportAccess";
+ const TopLevelDeclaration = ExportAccess + 1; IdentifierRole[IdentifierRole["TopLevelDeclaration"] = TopLevelDeclaration] = "TopLevelDeclaration";
+ const FunctionScopedDeclaration = TopLevelDeclaration + 1; IdentifierRole[IdentifierRole["FunctionScopedDeclaration"] = FunctionScopedDeclaration] = "FunctionScopedDeclaration";
+ const BlockScopedDeclaration = FunctionScopedDeclaration + 1; IdentifierRole[IdentifierRole["BlockScopedDeclaration"] = BlockScopedDeclaration] = "BlockScopedDeclaration";
+ const ObjectShorthandTopLevelDeclaration = BlockScopedDeclaration + 1; IdentifierRole[IdentifierRole["ObjectShorthandTopLevelDeclaration"] = ObjectShorthandTopLevelDeclaration] = "ObjectShorthandTopLevelDeclaration";
+ const ObjectShorthandFunctionScopedDeclaration = ObjectShorthandTopLevelDeclaration + 1; IdentifierRole[IdentifierRole["ObjectShorthandFunctionScopedDeclaration"] = ObjectShorthandFunctionScopedDeclaration] = "ObjectShorthandFunctionScopedDeclaration";
+ const ObjectShorthandBlockScopedDeclaration = ObjectShorthandFunctionScopedDeclaration + 1; IdentifierRole[IdentifierRole["ObjectShorthandBlockScopedDeclaration"] = ObjectShorthandBlockScopedDeclaration] = "ObjectShorthandBlockScopedDeclaration";
+ const ObjectShorthand = ObjectShorthandBlockScopedDeclaration + 1; IdentifierRole[IdentifierRole["ObjectShorthand"] = ObjectShorthand] = "ObjectShorthand";
+ // Any identifier bound in an import statement, e.g. both A and b from
+ // `import A, * as b from 'A';`
+ const ImportDeclaration = ObjectShorthand + 1; IdentifierRole[IdentifierRole["ImportDeclaration"] = ImportDeclaration] = "ImportDeclaration";
+ const ObjectKey = ImportDeclaration + 1; IdentifierRole[IdentifierRole["ObjectKey"] = ObjectKey] = "ObjectKey";
+ // The `foo` in `import {foo as bar} from "./abc";`.
+ const ImportAccess = ObjectKey + 1; IdentifierRole[IdentifierRole["ImportAccess"] = ImportAccess] = "ImportAccess";
+})(IdentifierRole || (IdentifierRole = {}));
+
+/**
+ * Extra information on jsxTagStart tokens, used to determine which of the three
+ * jsx functions are called in the automatic transform.
+ */
+export var JSXRole; (function (JSXRole) {
+ // The element is self-closing or has a body that resolves to empty. We
+ // shouldn't emit children at all in this case.
+ const NoChildren = 0; JSXRole[JSXRole["NoChildren"] = NoChildren] = "NoChildren";
+ // The element has a single explicit child, which might still be an arbitrary
+ // expression like an array. We should emit that expression as the children.
+ const OneChild = NoChildren + 1; JSXRole[JSXRole["OneChild"] = OneChild] = "OneChild";
+ // The element has at least two explicitly-specified children or has spread
+ // children, so child positions are assumed to be "static". We should wrap
+ // these children in an array.
+ const StaticChildren = OneChild + 1; JSXRole[JSXRole["StaticChildren"] = StaticChildren] = "StaticChildren";
+ // The element has a prop named "key" after a prop spread, so we should fall
+ // back to the createElement function.
+ const KeyAfterPropSpread = StaticChildren + 1; JSXRole[JSXRole["KeyAfterPropSpread"] = KeyAfterPropSpread] = "KeyAfterPropSpread";
+})(JSXRole || (JSXRole = {}));
+
+export function isDeclaration(token) {
+ const role = token.identifierRole;
+ return (
+ role === IdentifierRole.TopLevelDeclaration ||
+ role === IdentifierRole.FunctionScopedDeclaration ||
+ role === IdentifierRole.BlockScopedDeclaration ||
+ role === IdentifierRole.ObjectShorthandTopLevelDeclaration ||
+ role === IdentifierRole.ObjectShorthandFunctionScopedDeclaration ||
+ role === IdentifierRole.ObjectShorthandBlockScopedDeclaration
+ );
+}
+
+export function isNonTopLevelDeclaration(token) {
+ const role = token.identifierRole;
+ return (
+ role === IdentifierRole.FunctionScopedDeclaration ||
+ role === IdentifierRole.BlockScopedDeclaration ||
+ role === IdentifierRole.ObjectShorthandFunctionScopedDeclaration ||
+ role === IdentifierRole.ObjectShorthandBlockScopedDeclaration
+ );
+}
+
+export function isTopLevelDeclaration(token) {
+ const role = token.identifierRole;
+ return (
+ role === IdentifierRole.TopLevelDeclaration ||
+ role === IdentifierRole.ObjectShorthandTopLevelDeclaration ||
+ role === IdentifierRole.ImportDeclaration
+ );
+}
+
+export function isBlockScopedDeclaration(token) {
+ const role = token.identifierRole;
+ // Treat top-level declarations as block scope since the distinction doesn't matter here.
+ return (
+ role === IdentifierRole.TopLevelDeclaration ||
+ role === IdentifierRole.BlockScopedDeclaration ||
+ role === IdentifierRole.ObjectShorthandTopLevelDeclaration ||
+ role === IdentifierRole.ObjectShorthandBlockScopedDeclaration
+ );
+}
+
+export function isFunctionScopedDeclaration(token) {
+ const role = token.identifierRole;
+ return (
+ role === IdentifierRole.FunctionScopedDeclaration ||
+ role === IdentifierRole.ObjectShorthandFunctionScopedDeclaration
+ );
+}
+
+export function isObjectShorthandDeclaration(token) {
+ return (
+ token.identifierRole === IdentifierRole.ObjectShorthandTopLevelDeclaration ||
+ token.identifierRole === IdentifierRole.ObjectShorthandBlockScopedDeclaration ||
+ token.identifierRole === IdentifierRole.ObjectShorthandFunctionScopedDeclaration
+ );
+}
+
+// Object type used to represent tokens. Note that normally, tokens
+// simply exist as properties on the parser object. This is only
+// used for the onToken callback and the external tokenizer.
+export class Token {
+ constructor() {
+ this.type = state.type;
+ this.contextualKeyword = state.contextualKeyword;
+ this.start = state.start;
+ this.end = state.end;
+ this.scopeDepth = state.scopeDepth;
+ this.isType = state.isType;
+ this.identifierRole = null;
+ this.jsxRole = null;
+ this.shadowsGlobal = false;
+ this.isAsyncOperation = false;
+ this.contextId = null;
+ this.rhsEndIndex = null;
+ this.isExpression = false;
+ this.numNullishCoalesceStarts = 0;
+ this.numNullishCoalesceEnds = 0;
+ this.isOptionalChainStart = false;
+ this.isOptionalChainEnd = false;
+ this.subscriptStartIndex = null;
+ this.nullishStartIndex = null;
+ }
+
+
+
+
+
+
+
+
+
+ // Initially false for all tokens, then may be computed in a follow-up step that does scope
+ // analysis.
+
+ // Initially false for all tokens, but may be set during transform to mark it as containing an
+ // await operation.
+
+
+ // For assignments, the index of the RHS. For export tokens, the end of the export.
+
+ // For class tokens, records if the class is a class expression or a class statement.
+
+ // Number of times to insert a `nullishCoalesce(` snippet before this token.
+
+ // Number of times to insert a `)` snippet after this token.
+
+ // If true, insert an `optionalChain([` snippet before this token.
+
+ // If true, insert a `])` snippet after this token.
+
+ // Tag for `.`, `?.`, `[`, `?.[`, `(`, and `?.(` to denote the "root" token for this
+ // subscript chain. This can be used to determine if this chain is an optional chain.
+
+ // Tag for `??` operators to denote the root token for this nullish coalescing call.
+
+}
+
+// ## Tokenizer
+
+// Move to the next token
+export function next() {
+ state.tokens.push(new Token());
+ nextToken();
+}
+
+// Call instead of next when inside a template, since that needs to be handled differently.
+export function nextTemplateToken() {
+ state.tokens.push(new Token());
+ state.start = state.pos;
+ readTmplToken();
+}
+
+// The tokenizer never parses regexes by default. Instead, the parser is responsible for
+// instructing it to parse a regex when we see a slash at the start of an expression.
+export function retokenizeSlashAsRegex() {
+ if (state.type === tt.assign) {
+ --state.pos;
+ }
+ readRegexp();
+}
+
+export function pushTypeContext(existingTokensInType) {
+ for (let i = state.tokens.length - existingTokensInType; i < state.tokens.length; i++) {
+ state.tokens[i].isType = true;
+ }
+ const oldIsType = state.isType;
+ state.isType = true;
+ return oldIsType;
+}
+
+export function popTypeContext(oldIsType) {
+ state.isType = oldIsType;
+}
+
+export function eat(type) {
+ if (match(type)) {
+ next();
+ return true;
+ } else {
+ return false;
+ }
+}
+
+export function eatTypeToken(tokenType) {
+ const oldIsType = state.isType;
+ state.isType = true;
+ eat(tokenType);
+ state.isType = oldIsType;
+}
+
+export function match(type) {
+ return state.type === type;
+}
+
+export function lookaheadType() {
+ const snapshot = state.snapshot();
+ next();
+ const type = state.type;
+ state.restoreFromSnapshot(snapshot);
+ return type;
+}
+
+export class TypeAndKeyword {
+
+
+ constructor(type, contextualKeyword) {
+ this.type = type;
+ this.contextualKeyword = contextualKeyword;
+ }
+}
+
+export function lookaheadTypeAndKeyword() {
+ const snapshot = state.snapshot();
+ next();
+ const type = state.type;
+ const contextualKeyword = state.contextualKeyword;
+ state.restoreFromSnapshot(snapshot);
+ return new TypeAndKeyword(type, contextualKeyword);
+}
+
+export function nextTokenStart() {
+ return nextTokenStartSince(state.pos);
+}
+
+export function nextTokenStartSince(pos) {
+ skipWhiteSpace.lastIndex = pos;
+ const skip = skipWhiteSpace.exec(input);
+ return pos + skip[0].length;
+}
+
+export function lookaheadCharCode() {
+ return input.charCodeAt(nextTokenStart());
+}
+
+// Read a single token, updating the parser object's token-related
+// properties.
+export function nextToken() {
+ skipSpace();
+ state.start = state.pos;
+ if (state.pos >= input.length) {
+ const tokens = state.tokens;
+ // We normally run past the end a bit, but if we're way past the end, avoid an infinite loop.
+ // Also check the token positions rather than the types since sometimes we rewrite the token
+ // type to something else.
+ if (
+ tokens.length >= 2 &&
+ tokens[tokens.length - 1].start >= input.length &&
+ tokens[tokens.length - 2].start >= input.length
+ ) {
+ unexpected("Unexpectedly reached the end of input.");
+ }
+ finishToken(tt.eof);
+ return;
+ }
+ readToken(input.charCodeAt(state.pos));
+}
+
+function readToken(code) {
+ // Identifier or keyword. '\uXXXX' sequences are allowed in
+ // identifiers, so '\' also dispatches to that.
+ if (
+ IS_IDENTIFIER_START[code] ||
+ code === charCodes.backslash ||
+ (code === charCodes.atSign && input.charCodeAt(state.pos + 1) === charCodes.atSign)
+ ) {
+ readWord();
+ } else {
+ getTokenFromCode(code);
+ }
+}
+
+function skipBlockComment() {
+ while (
+ input.charCodeAt(state.pos) !== charCodes.asterisk ||
+ input.charCodeAt(state.pos + 1) !== charCodes.slash
+ ) {
+ state.pos++;
+ if (state.pos > input.length) {
+ unexpected("Unterminated comment", state.pos - 2);
+ return;
+ }
+ }
+ state.pos += 2;
+}
+
+export function skipLineComment(startSkip) {
+ let ch = input.charCodeAt((state.pos += startSkip));
+ if (state.pos < input.length) {
+ while (
+ ch !== charCodes.lineFeed &&
+ ch !== charCodes.carriageReturn &&
+ ch !== charCodes.lineSeparator &&
+ ch !== charCodes.paragraphSeparator &&
+ ++state.pos < input.length
+ ) {
+ ch = input.charCodeAt(state.pos);
+ }
+ }
+}
+
+// Called at the start of the parse and after every token. Skips
+// whitespace and comments.
+export function skipSpace() {
+ while (state.pos < input.length) {
+ const ch = input.charCodeAt(state.pos);
+ switch (ch) {
+ case charCodes.carriageReturn:
+ if (input.charCodeAt(state.pos + 1) === charCodes.lineFeed) {
+ ++state.pos;
+ }
+
+ case charCodes.lineFeed:
+ case charCodes.lineSeparator:
+ case charCodes.paragraphSeparator:
+ ++state.pos;
+ break;
+
+ case charCodes.slash:
+ switch (input.charCodeAt(state.pos + 1)) {
+ case charCodes.asterisk:
+ state.pos += 2;
+ skipBlockComment();
+ break;
+
+ case charCodes.slash:
+ skipLineComment(2);
+ break;
+
+ default:
+ return;
+ }
+ break;
+
+ default:
+ if (IS_WHITESPACE[ch]) {
+ ++state.pos;
+ } else {
+ return;
+ }
+ }
+ }
+}
+
+// Called at the end of every token. Sets various fields, and skips the space after the token, so
+// that the next one's `start` will point at the right position.
+export function finishToken(
+ type,
+ contextualKeyword = ContextualKeyword.NONE,
+) {
+ state.end = state.pos;
+ state.type = type;
+ state.contextualKeyword = contextualKeyword;
+}
+
+// ### Token reading
+
+// This is the function that is called to fetch the next token. It
+// is somewhat obscure, because it works in character codes rather
+// than characters, and because operator parsing has been inlined
+// into it.
+//
+// All in the name of speed.
+function readToken_dot() {
+ const nextChar = input.charCodeAt(state.pos + 1);
+ if (nextChar >= charCodes.digit0 && nextChar <= charCodes.digit9) {
+ readNumber(true);
+ return;
+ }
+
+ if (nextChar === charCodes.dot && input.charCodeAt(state.pos + 2) === charCodes.dot) {
+ state.pos += 3;
+ finishToken(tt.ellipsis);
+ } else {
+ ++state.pos;
+ finishToken(tt.dot);
+ }
+}
+
+function readToken_slash() {
+ const nextChar = input.charCodeAt(state.pos + 1);
+ if (nextChar === charCodes.equalsTo) {
+ finishOp(tt.assign, 2);
+ } else {
+ finishOp(tt.slash, 1);
+ }
+}
+
+function readToken_mult_modulo(code) {
+ // '%*'
+ let tokenType = code === charCodes.asterisk ? tt.star : tt.modulo;
+ let width = 1;
+ let nextChar = input.charCodeAt(state.pos + 1);
+
+ // Exponentiation operator **
+ if (code === charCodes.asterisk && nextChar === charCodes.asterisk) {
+ width++;
+ nextChar = input.charCodeAt(state.pos + 2);
+ tokenType = tt.exponent;
+ }
+
+ // Match *= or %=, disallowing *=> which can be valid in flow.
+ if (
+ nextChar === charCodes.equalsTo &&
+ input.charCodeAt(state.pos + 2) !== charCodes.greaterThan
+ ) {
+ width++;
+ tokenType = tt.assign;
+ }
+
+ finishOp(tokenType, width);
+}
+
+function readToken_pipe_amp(code) {
+ // '|&'
+ const nextChar = input.charCodeAt(state.pos + 1);
+
+ if (nextChar === code) {
+ if (input.charCodeAt(state.pos + 2) === charCodes.equalsTo) {
+ // ||= or &&=
+ finishOp(tt.assign, 3);
+ } else {
+ // || or &&
+ finishOp(code === charCodes.verticalBar ? tt.logicalOR : tt.logicalAND, 2);
+ }
+ return;
+ }
+
+ if (code === charCodes.verticalBar) {
+ // '|>'
+ if (nextChar === charCodes.greaterThan) {
+ finishOp(tt.pipeline, 2);
+ return;
+ } else if (nextChar === charCodes.rightCurlyBrace && isFlowEnabled) {
+ // '|}'
+ finishOp(tt.braceBarR, 2);
+ return;
+ }
+ }
+
+ if (nextChar === charCodes.equalsTo) {
+ finishOp(tt.assign, 2);
+ return;
+ }
+
+ finishOp(code === charCodes.verticalBar ? tt.bitwiseOR : tt.bitwiseAND, 1);
+}
+
+function readToken_caret() {
+ // '^'
+ const nextChar = input.charCodeAt(state.pos + 1);
+ if (nextChar === charCodes.equalsTo) {
+ finishOp(tt.assign, 2);
+ } else {
+ finishOp(tt.bitwiseXOR, 1);
+ }
+}
+
+function readToken_plus_min(code) {
+ // '+-'
+ const nextChar = input.charCodeAt(state.pos + 1);
+
+ if (nextChar === code) {
+ // Tentatively call this a prefix operator, but it might be changed to postfix later.
+ finishOp(tt.preIncDec, 2);
+ return;
+ }
+
+ if (nextChar === charCodes.equalsTo) {
+ finishOp(tt.assign, 2);
+ } else if (code === charCodes.plusSign) {
+ finishOp(tt.plus, 1);
+ } else {
+ finishOp(tt.minus, 1);
+ }
+}
+
+function readToken_lt() {
+ const nextChar = input.charCodeAt(state.pos + 1);
+
+ if (nextChar === charCodes.lessThan) {
+ if (input.charCodeAt(state.pos + 2) === charCodes.equalsTo) {
+ finishOp(tt.assign, 3);
+ return;
+ }
+ // We see <<, but need to be really careful about whether to treat it as a
+ // true left-shift or as two < tokens.
+ if (state.isType) {
+ // Within a type, << might come up in a snippet like `Array<<T>() => void>`,
+ // so treat it as two < tokens. Importantly, this should only override <<
+ // rather than other tokens like <= . If we treated <= as < in a type
+ // context, then the snippet `a as T <= 1` would incorrectly start parsing
+ // a type argument on T. We don't need to worry about `a as T << 1`
+ // because TypeScript disallows that syntax.
+ finishOp(tt.lessThan, 1);
+ } else {
+ // Outside a type, this might be a true left-shift operator, or it might
+ // still be two open-type-arg tokens, such as in `f<<T>() => void>()`. We
+ // look at the token while considering the `f`, so we don't yet know that
+ // we're in a type context. In this case, we initially tokenize as a
+ // left-shift and correct after-the-fact as necessary in
+ // tsParseTypeArgumentsWithPossibleBitshift .
+ finishOp(tt.bitShiftL, 2);
+ }
+ return;
+ }
+
+ if (nextChar === charCodes.equalsTo) {
+ // <=
+ finishOp(tt.relationalOrEqual, 2);
+ } else {
+ finishOp(tt.lessThan, 1);
+ }
+}
+
+function readToken_gt() {
+ if (state.isType) {
+ // Avoid right-shift for things like `Array<Array<string>>` and
+ // greater-than-or-equal for things like `const a: Array<number>=[];`.
+ finishOp(tt.greaterThan, 1);
+ return;
+ }
+
+ const nextChar = input.charCodeAt(state.pos + 1);
+
+ if (nextChar === charCodes.greaterThan) {
+ const size = input.charCodeAt(state.pos + 2) === charCodes.greaterThan ? 3 : 2;
+ if (input.charCodeAt(state.pos + size) === charCodes.equalsTo) {
+ finishOp(tt.assign, size + 1);
+ return;
+ }
+ finishOp(tt.bitShiftR, size);
+ return;
+ }
+
+ if (nextChar === charCodes.equalsTo) {
+ // >=
+ finishOp(tt.relationalOrEqual, 2);
+ } else {
+ finishOp(tt.greaterThan, 1);
+ }
+}
+
+/**
+ * Reinterpret a possible > token when transitioning from a type to a non-type
+ * context.
+ *
+ * This comes up in two situations where >= needs to be treated as one token:
+ * - After an `as` expression, like in the code `a as T >= 1`.
+ * - In a type argument in an expression context, e.g. `f(a < b, c >= d)`, we
+ * need to see the token as >= so that we get an error and backtrack to
+ * normal expression parsing.
+ *
+ * Other situations require >= to be seen as two tokens, e.g.
+ * `const x: Array<T>=[];`, so it's important to treat > as its own token in
+ * typical type parsing situations.
+ */
+export function rescan_gt() {
+ if (state.type === tt.greaterThan) {
+ state.pos -= 1;
+ readToken_gt();
+ }
+}
+
+function readToken_eq_excl(code) {
+ // '=!'
+ const nextChar = input.charCodeAt(state.pos + 1);
+ if (nextChar === charCodes.equalsTo) {
+ finishOp(tt.equality, input.charCodeAt(state.pos + 2) === charCodes.equalsTo ? 3 : 2);
+ return;
+ }
+ if (code === charCodes.equalsTo && nextChar === charCodes.greaterThan) {
+ // '=>'
+ state.pos += 2;
+ finishToken(tt.arrow);
+ return;
+ }
+ finishOp(code === charCodes.equalsTo ? tt.eq : tt.bang, 1);
+}
+
+function readToken_question() {
+ // '?'
+ const nextChar = input.charCodeAt(state.pos + 1);
+ const nextChar2 = input.charCodeAt(state.pos + 2);
+ if (
+ nextChar === charCodes.questionMark &&
+ // In Flow (but not TypeScript), ??string is a valid type that should be
+ // tokenized as two individual ? tokens.
+ !(isFlowEnabled && state.isType)
+ ) {
+ if (nextChar2 === charCodes.equalsTo) {
+ // '??='
+ finishOp(tt.assign, 3);
+ } else {
+ // '??'
+ finishOp(tt.nullishCoalescing, 2);
+ }
+ } else if (
+ nextChar === charCodes.dot &&
+ !(nextChar2 >= charCodes.digit0 && nextChar2 <= charCodes.digit9)
+ ) {
+ // '.' not followed by a number
+ state.pos += 2;
+ finishToken(tt.questionDot);
+ } else {
+ ++state.pos;
+ finishToken(tt.question);
+ }
+}
+
+export function getTokenFromCode(code) {
+ switch (code) {
+ case charCodes.numberSign:
+ ++state.pos;
+ finishToken(tt.hash);
+ return;
+
+ // The interpretation of a dot depends on whether it is followed
+ // by a digit or another two dots.
+
+ case charCodes.dot:
+ readToken_dot();
+ return;
+
+ // Punctuation tokens.
+ case charCodes.leftParenthesis:
+ ++state.pos;
+ finishToken(tt.parenL);
+ return;
+ case charCodes.rightParenthesis:
+ ++state.pos;
+ finishToken(tt.parenR);
+ return;
+ case charCodes.semicolon:
+ ++state.pos;
+ finishToken(tt.semi);
+ return;
+ case charCodes.comma:
+ ++state.pos;
+ finishToken(tt.comma);
+ return;
+ case charCodes.leftSquareBracket:
+ ++state.pos;
+ finishToken(tt.bracketL);
+ return;
+ case charCodes.rightSquareBracket:
+ ++state.pos;
+ finishToken(tt.bracketR);
+ return;
+
+ case charCodes.leftCurlyBrace:
+ if (isFlowEnabled && input.charCodeAt(state.pos + 1) === charCodes.verticalBar) {
+ finishOp(tt.braceBarL, 2);
+ } else {
+ ++state.pos;
+ finishToken(tt.braceL);
+ }
+ return;
+
+ case charCodes.rightCurlyBrace:
+ ++state.pos;
+ finishToken(tt.braceR);
+ return;
+
+ case charCodes.colon:
+ if (input.charCodeAt(state.pos + 1) === charCodes.colon) {
+ finishOp(tt.doubleColon, 2);
+ } else {
+ ++state.pos;
+ finishToken(tt.colon);
+ }
+ return;
+
+ case charCodes.questionMark:
+ readToken_question();
+ return;
+ case charCodes.atSign:
+ ++state.pos;
+ finishToken(tt.at);
+ return;
+
+ case charCodes.graveAccent:
+ ++state.pos;
+ finishToken(tt.backQuote);
+ return;
+
+ case charCodes.digit0: {
+ const nextChar = input.charCodeAt(state.pos + 1);
+ // '0x', '0X', '0o', '0O', '0b', '0B'
+ if (
+ nextChar === charCodes.lowercaseX ||
+ nextChar === charCodes.uppercaseX ||
+ nextChar === charCodes.lowercaseO ||
+ nextChar === charCodes.uppercaseO ||
+ nextChar === charCodes.lowercaseB ||
+ nextChar === charCodes.uppercaseB
+ ) {
+ readRadixNumber();
+ return;
+ }
+ }
+ // Anything else beginning with a digit is an integer, octal
+ // number, or float.
+ case charCodes.digit1:
+ case charCodes.digit2:
+ case charCodes.digit3:
+ case charCodes.digit4:
+ case charCodes.digit5:
+ case charCodes.digit6:
+ case charCodes.digit7:
+ case charCodes.digit8:
+ case charCodes.digit9:
+ readNumber(false);
+ return;
+
+ // Quotes produce strings.
+ case charCodes.quotationMark:
+ case charCodes.apostrophe:
+ readString(code);
+ return;
+
+ // Operators are parsed inline in tiny state machines. '=' (charCodes.equalsTo) is
+ // often referred to. `finishOp` simply skips the amount of
+ // characters it is given as second argument, and returns a token
+ // of the type given by its first argument.
+
+ case charCodes.slash:
+ readToken_slash();
+ return;
+
+ case charCodes.percentSign:
+ case charCodes.asterisk:
+ readToken_mult_modulo(code);
+ return;
+
+ case charCodes.verticalBar:
+ case charCodes.ampersand:
+ readToken_pipe_amp(code);
+ return;
+
+ case charCodes.caret:
+ readToken_caret();
+ return;
+
+ case charCodes.plusSign:
+ case charCodes.dash:
+ readToken_plus_min(code);
+ return;
+
+ case charCodes.lessThan:
+ readToken_lt();
+ return;
+
+ case charCodes.greaterThan:
+ readToken_gt();
+ return;
+
+ case charCodes.equalsTo:
+ case charCodes.exclamationMark:
+ readToken_eq_excl(code);
+ return;
+
+ case charCodes.tilde:
+ finishOp(tt.tilde, 1);
+ return;
+
+ default:
+ break;
+ }
+
+ unexpected(`Unexpected character '${String.fromCharCode(code)}'`, state.pos);
+}
+
+function finishOp(type, size) {
+ state.pos += size;
+ finishToken(type);
+}
+
+function readRegexp() {
+ const start = state.pos;
+ let escaped = false;
+ let inClass = false;
+ for (;;) {
+ if (state.pos >= input.length) {
+ unexpected("Unterminated regular expression", start);
+ return;
+ }
+ const code = input.charCodeAt(state.pos);
+ if (escaped) {
+ escaped = false;
+ } else {
+ if (code === charCodes.leftSquareBracket) {
+ inClass = true;
+ } else if (code === charCodes.rightSquareBracket && inClass) {
+ inClass = false;
+ } else if (code === charCodes.slash && !inClass) {
+ break;
+ }
+ escaped = code === charCodes.backslash;
+ }
+ ++state.pos;
+ }
+ ++state.pos;
+ // Need to use `skipWord` because '\uXXXX' sequences are allowed here (don't ask).
+ skipWord();
+
+ finishToken(tt.regexp);
+}
+
+/**
+ * Read a decimal integer. Note that this can't be unified with the similar code
+ * in readRadixNumber (which also handles hex digits) because "e" needs to be
+ * the end of the integer so that we can properly handle scientific notation.
+ */
+function readInt() {
+ while (true) {
+ const code = input.charCodeAt(state.pos);
+ if ((code >= charCodes.digit0 && code <= charCodes.digit9) || code === charCodes.underscore) {
+ state.pos++;
+ } else {
+ break;
+ }
+ }
+}
+
+function readRadixNumber() {
+ state.pos += 2; // 0x
+
+ // Walk to the end of the number, allowing hex digits.
+ while (true) {
+ const code = input.charCodeAt(state.pos);
+ if (
+ (code >= charCodes.digit0 && code <= charCodes.digit9) ||
+ (code >= charCodes.lowercaseA && code <= charCodes.lowercaseF) ||
+ (code >= charCodes.uppercaseA && code <= charCodes.uppercaseF) ||
+ code === charCodes.underscore
+ ) {
+ state.pos++;
+ } else {
+ break;
+ }
+ }
+
+ const nextChar = input.charCodeAt(state.pos);
+ if (nextChar === charCodes.lowercaseN) {
+ ++state.pos;
+ finishToken(tt.bigint);
+ } else {
+ finishToken(tt.num);
+ }
+}
+
+// Read an integer, octal integer, or floating-point number.
+function readNumber(startsWithDot) {
+ let isBigInt = false;
+ let isDecimal = false;
+
+ if (!startsWithDot) {
+ readInt();
+ }
+
+ let nextChar = input.charCodeAt(state.pos);
+ if (nextChar === charCodes.dot) {
+ ++state.pos;
+ readInt();
+ nextChar = input.charCodeAt(state.pos);
+ }
+
+ if (nextChar === charCodes.uppercaseE || nextChar === charCodes.lowercaseE) {
+ nextChar = input.charCodeAt(++state.pos);
+ if (nextChar === charCodes.plusSign || nextChar === charCodes.dash) {
+ ++state.pos;
+ }
+ readInt();
+ nextChar = input.charCodeAt(state.pos);
+ }
+
+ if (nextChar === charCodes.lowercaseN) {
+ ++state.pos;
+ isBigInt = true;
+ } else if (nextChar === charCodes.lowercaseM) {
+ ++state.pos;
+ isDecimal = true;
+ }
+
+ if (isBigInt) {
+ finishToken(tt.bigint);
+ return;
+ }
+
+ if (isDecimal) {
+ finishToken(tt.decimal);
+ return;
+ }
+
+ finishToken(tt.num);
+}
+
+function readString(quote) {
+ state.pos++;
+ for (;;) {
+ if (state.pos >= input.length) {
+ unexpected("Unterminated string constant");
+ return;
+ }
+ const ch = input.charCodeAt(state.pos);
+ if (ch === charCodes.backslash) {
+ state.pos++;
+ } else if (ch === quote) {
+ break;
+ }
+ state.pos++;
+ }
+ state.pos++;
+ finishToken(tt.string);
+}
+
+// Reads template string tokens.
+function readTmplToken() {
+ for (;;) {
+ if (state.pos >= input.length) {
+ unexpected("Unterminated template");
+ return;
+ }
+ const ch = input.charCodeAt(state.pos);
+ if (
+ ch === charCodes.graveAccent ||
+ (ch === charCodes.dollarSign && input.charCodeAt(state.pos + 1) === charCodes.leftCurlyBrace)
+ ) {
+ if (state.pos === state.start && match(tt.template)) {
+ if (ch === charCodes.dollarSign) {
+ state.pos += 2;
+ finishToken(tt.dollarBraceL);
+ return;
+ } else {
+ ++state.pos;
+ finishToken(tt.backQuote);
+ return;
+ }
+ }
+ finishToken(tt.template);
+ return;
+ }
+ if (ch === charCodes.backslash) {
+ state.pos++;
+ }
+ state.pos++;
+ }
+}
+
+// Skip to the end of the current word. Note that this is the same as the snippet at the end of
+// readWord, but calling skipWord from readWord seems to slightly hurt performance from some rough
+// measurements.
+export function skipWord() {
+ while (state.pos < input.length) {
+ const ch = input.charCodeAt(state.pos);
+ if (IS_IDENTIFIER_CHAR[ch]) {
+ state.pos++;
+ } else if (ch === charCodes.backslash) {
+ // \u
+ state.pos += 2;
+ if (input.charCodeAt(state.pos) === charCodes.leftCurlyBrace) {
+ while (
+ state.pos < input.length &&
+ input.charCodeAt(state.pos) !== charCodes.rightCurlyBrace
+ ) {
+ state.pos++;
+ }
+ state.pos++;
+ }
+ } else {
+ break;
+ }
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/parser/tokenizer/keywords.js b/node_modules/sucrase/dist/esm/parser/tokenizer/keywords.js
new file mode 100644
index 0000000..0dcf1b0
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/tokenizer/keywords.js
@@ -0,0 +1,43 @@
+export var ContextualKeyword; (function (ContextualKeyword) {
+ const NONE = 0; ContextualKeyword[ContextualKeyword["NONE"] = NONE] = "NONE";
+ const _abstract = NONE + 1; ContextualKeyword[ContextualKeyword["_abstract"] = _abstract] = "_abstract";
+ const _accessor = _abstract + 1; ContextualKeyword[ContextualKeyword["_accessor"] = _accessor] = "_accessor";
+ const _as = _accessor + 1; ContextualKeyword[ContextualKeyword["_as"] = _as] = "_as";
+ const _assert = _as + 1; ContextualKeyword[ContextualKeyword["_assert"] = _assert] = "_assert";
+ const _asserts = _assert + 1; ContextualKeyword[ContextualKeyword["_asserts"] = _asserts] = "_asserts";
+ const _async = _asserts + 1; ContextualKeyword[ContextualKeyword["_async"] = _async] = "_async";
+ const _await = _async + 1; ContextualKeyword[ContextualKeyword["_await"] = _await] = "_await";
+ const _checks = _await + 1; ContextualKeyword[ContextualKeyword["_checks"] = _checks] = "_checks";
+ const _constructor = _checks + 1; ContextualKeyword[ContextualKeyword["_constructor"] = _constructor] = "_constructor";
+ const _declare = _constructor + 1; ContextualKeyword[ContextualKeyword["_declare"] = _declare] = "_declare";
+ const _enum = _declare + 1; ContextualKeyword[ContextualKeyword["_enum"] = _enum] = "_enum";
+ const _exports = _enum + 1; ContextualKeyword[ContextualKeyword["_exports"] = _exports] = "_exports";
+ const _from = _exports + 1; ContextualKeyword[ContextualKeyword["_from"] = _from] = "_from";
+ const _get = _from + 1; ContextualKeyword[ContextualKeyword["_get"] = _get] = "_get";
+ const _global = _get + 1; ContextualKeyword[ContextualKeyword["_global"] = _global] = "_global";
+ const _implements = _global + 1; ContextualKeyword[ContextualKeyword["_implements"] = _implements] = "_implements";
+ const _infer = _implements + 1; ContextualKeyword[ContextualKeyword["_infer"] = _infer] = "_infer";
+ const _interface = _infer + 1; ContextualKeyword[ContextualKeyword["_interface"] = _interface] = "_interface";
+ const _is = _interface + 1; ContextualKeyword[ContextualKeyword["_is"] = _is] = "_is";
+ const _keyof = _is + 1; ContextualKeyword[ContextualKeyword["_keyof"] = _keyof] = "_keyof";
+ const _mixins = _keyof + 1; ContextualKeyword[ContextualKeyword["_mixins"] = _mixins] = "_mixins";
+ const _module = _mixins + 1; ContextualKeyword[ContextualKeyword["_module"] = _module] = "_module";
+ const _namespace = _module + 1; ContextualKeyword[ContextualKeyword["_namespace"] = _namespace] = "_namespace";
+ const _of = _namespace + 1; ContextualKeyword[ContextualKeyword["_of"] = _of] = "_of";
+ const _opaque = _of + 1; ContextualKeyword[ContextualKeyword["_opaque"] = _opaque] = "_opaque";
+ const _out = _opaque + 1; ContextualKeyword[ContextualKeyword["_out"] = _out] = "_out";
+ const _override = _out + 1; ContextualKeyword[ContextualKeyword["_override"] = _override] = "_override";
+ const _private = _override + 1; ContextualKeyword[ContextualKeyword["_private"] = _private] = "_private";
+ const _protected = _private + 1; ContextualKeyword[ContextualKeyword["_protected"] = _protected] = "_protected";
+ const _proto = _protected + 1; ContextualKeyword[ContextualKeyword["_proto"] = _proto] = "_proto";
+ const _public = _proto + 1; ContextualKeyword[ContextualKeyword["_public"] = _public] = "_public";
+ const _readonly = _public + 1; ContextualKeyword[ContextualKeyword["_readonly"] = _readonly] = "_readonly";
+ const _require = _readonly + 1; ContextualKeyword[ContextualKeyword["_require"] = _require] = "_require";
+ const _satisfies = _require + 1; ContextualKeyword[ContextualKeyword["_satisfies"] = _satisfies] = "_satisfies";
+ const _set = _satisfies + 1; ContextualKeyword[ContextualKeyword["_set"] = _set] = "_set";
+ const _static = _set + 1; ContextualKeyword[ContextualKeyword["_static"] = _static] = "_static";
+ const _symbol = _static + 1; ContextualKeyword[ContextualKeyword["_symbol"] = _symbol] = "_symbol";
+ const _type = _symbol + 1; ContextualKeyword[ContextualKeyword["_type"] = _type] = "_type";
+ const _unique = _type + 1; ContextualKeyword[ContextualKeyword["_unique"] = _unique] = "_unique";
+ const _using = _unique + 1; ContextualKeyword[ContextualKeyword["_using"] = _using] = "_using";
+})(ContextualKeyword || (ContextualKeyword = {}));
diff --git a/node_modules/sucrase/dist/esm/parser/tokenizer/readWord.js b/node_modules/sucrase/dist/esm/parser/tokenizer/readWord.js
new file mode 100644
index 0000000..cf3df89
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/tokenizer/readWord.js
@@ -0,0 +1,64 @@
+import {input, state} from "../traverser/base";
+import {charCodes} from "../util/charcodes";
+import {IS_IDENTIFIER_CHAR} from "../util/identifier";
+import {finishToken} from "./index";
+import {READ_WORD_TREE} from "./readWordTree";
+import {TokenType as tt} from "./types";
+
+/**
+ * Read an identifier, producing either a name token or matching on one of the existing keywords.
+ * For performance, we pre-generate big decision tree that we traverse. Each node represents a
+ * prefix and has 27 values, where the first value is the token or contextual token, if any (-1 if
+ * not), and the other 26 values are the transitions to other nodes, or -1 to stop.
+ */
+export default function readWord() {
+ let treePos = 0;
+ let code = 0;
+ let pos = state.pos;
+ while (pos < input.length) {
+ code = input.charCodeAt(pos);
+ if (code < charCodes.lowercaseA || code > charCodes.lowercaseZ) {
+ break;
+ }
+ const next = READ_WORD_TREE[treePos + (code - charCodes.lowercaseA) + 1];
+ if (next === -1) {
+ break;
+ } else {
+ treePos = next;
+ pos++;
+ }
+ }
+
+ const keywordValue = READ_WORD_TREE[treePos];
+ if (keywordValue > -1 && !IS_IDENTIFIER_CHAR[code]) {
+ state.pos = pos;
+ if (keywordValue & 1) {
+ finishToken(keywordValue >>> 1);
+ } else {
+ finishToken(tt.name, keywordValue >>> 1);
+ }
+ return;
+ }
+
+ while (pos < input.length) {
+ const ch = input.charCodeAt(pos);
+ if (IS_IDENTIFIER_CHAR[ch]) {
+ pos++;
+ } else if (ch === charCodes.backslash) {
+ // \u
+ pos += 2;
+ if (input.charCodeAt(pos) === charCodes.leftCurlyBrace) {
+ while (pos < input.length && input.charCodeAt(pos) !== charCodes.rightCurlyBrace) {
+ pos++;
+ }
+ pos++;
+ }
+ } else if (ch === charCodes.atSign && input.charCodeAt(pos + 1) === charCodes.atSign) {
+ pos += 2;
+ } else {
+ break;
+ }
+ }
+ state.pos = pos;
+ finishToken(tt.name);
+}
diff --git a/node_modules/sucrase/dist/esm/parser/tokenizer/readWordTree.js b/node_modules/sucrase/dist/esm/parser/tokenizer/readWordTree.js
new file mode 100644
index 0000000..ffb8cac
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/tokenizer/readWordTree.js
@@ -0,0 +1,671 @@
+// Generated file, do not edit! Run "yarn generate" to re-generate this file.
+import {ContextualKeyword} from "./keywords";
+import {TokenType as tt} from "./types";
+
+// prettier-ignore
+export const READ_WORD_TREE = new Int32Array([
+ // ""
+ -1, 27, 783, 918, 1755, 2376, 2862, 3483, -1, 3699, -1, 4617, 4752, 4833, 5130, 5508, 5940, -1, 6480, 6939, 7749, 8181, 8451, 8613, -1, 8829, -1,
+ // "a"
+ -1, -1, 54, 243, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 432, -1, -1, -1, 675, -1, -1, -1,
+ // "ab"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 81, -1, -1, -1, -1, -1, -1, -1,
+ // "abs"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 108, -1, -1, -1, -1, -1, -1,
+ // "abst"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 135, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "abstr"
+ -1, 162, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "abstra"
+ -1, -1, -1, 189, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "abstrac"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 216, -1, -1, -1, -1, -1, -1,
+ // "abstract"
+ ContextualKeyword._abstract << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ac"
+ -1, -1, -1, 270, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "acc"
+ -1, -1, -1, -1, -1, 297, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "acce"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 324, -1, -1, -1, -1, -1, -1, -1,
+ // "acces"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 351, -1, -1, -1, -1, -1, -1, -1,
+ // "access"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 378, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "accesso"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 405, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "accessor"
+ ContextualKeyword._accessor << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "as"
+ ContextualKeyword._as << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 459, -1, -1, -1, -1, -1, 594, -1,
+ // "ass"
+ -1, -1, -1, -1, -1, 486, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "asse"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 513, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "asser"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 540, -1, -1, -1, -1, -1, -1,
+ // "assert"
+ ContextualKeyword._assert << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 567, -1, -1, -1, -1, -1, -1, -1,
+ // "asserts"
+ ContextualKeyword._asserts << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "asy"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 621, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "asyn"
+ -1, -1, -1, 648, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "async"
+ ContextualKeyword._async << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "aw"
+ -1, 702, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "awa"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 729, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "awai"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 756, -1, -1, -1, -1, -1, -1,
+ // "await"
+ ContextualKeyword._await << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "b"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 810, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "br"
+ -1, -1, -1, -1, -1, 837, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "bre"
+ -1, 864, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "brea"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 891, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "break"
+ (tt._break << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "c"
+ -1, 945, -1, -1, -1, -1, -1, -1, 1107, -1, -1, -1, 1242, -1, -1, 1350, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ca"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 972, 1026, -1, -1, -1, -1, -1, -1,
+ // "cas"
+ -1, -1, -1, -1, -1, 999, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "case"
+ (tt._case << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "cat"
+ -1, -1, -1, 1053, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "catc"
+ -1, -1, -1, -1, -1, -1, -1, -1, 1080, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "catch"
+ (tt._catch << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ch"
+ -1, -1, -1, -1, -1, 1134, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "che"
+ -1, -1, -1, 1161, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "chec"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1188, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "check"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1215, -1, -1, -1, -1, -1, -1, -1,
+ // "checks"
+ ContextualKeyword._checks << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "cl"
+ -1, 1269, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "cla"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1296, -1, -1, -1, -1, -1, -1, -1,
+ // "clas"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1323, -1, -1, -1, -1, -1, -1, -1,
+ // "class"
+ (tt._class << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "co"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1377, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "con"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1404, 1620, -1, -1, -1, -1, -1, -1,
+ // "cons"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1431, -1, -1, -1, -1, -1, -1,
+ // "const"
+ (tt._const << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1458, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "constr"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1485, -1, -1, -1, -1, -1,
+ // "constru"
+ -1, -1, -1, 1512, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "construc"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1539, -1, -1, -1, -1, -1, -1,
+ // "construct"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1566, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "constructo"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1593, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "constructor"
+ ContextualKeyword._constructor << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "cont"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 1647, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "conti"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1674, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "contin"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1701, -1, -1, -1, -1, -1,
+ // "continu"
+ -1, -1, -1, -1, -1, 1728, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "continue"
+ (tt._continue << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "d"
+ -1, -1, -1, -1, -1, 1782, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2349, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "de"
+ -1, -1, 1809, 1971, -1, -1, 2106, -1, -1, -1, -1, -1, 2241, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "deb"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1836, -1, -1, -1, -1, -1,
+ // "debu"
+ -1, -1, -1, -1, -1, -1, -1, 1863, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "debug"
+ -1, -1, -1, -1, -1, -1, -1, 1890, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "debugg"
+ -1, -1, -1, -1, -1, 1917, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "debugge"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1944, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "debugger"
+ (tt._debugger << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "dec"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1998, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "decl"
+ -1, 2025, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "decla"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2052, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "declar"
+ -1, -1, -1, -1, -1, 2079, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "declare"
+ ContextualKeyword._declare << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "def"
+ -1, 2133, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "defa"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2160, -1, -1, -1, -1, -1,
+ // "defau"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2187, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "defaul"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2214, -1, -1, -1, -1, -1, -1,
+ // "default"
+ (tt._default << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "del"
+ -1, -1, -1, -1, -1, 2268, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "dele"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2295, -1, -1, -1, -1, -1, -1,
+ // "delet"
+ -1, -1, -1, -1, -1, 2322, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "delete"
+ (tt._delete << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "do"
+ (tt._do << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "e"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2403, -1, 2484, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2565, -1, -1,
+ // "el"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2430, -1, -1, -1, -1, -1, -1, -1,
+ // "els"
+ -1, -1, -1, -1, -1, 2457, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "else"
+ (tt._else << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "en"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2511, -1, -1, -1, -1, -1,
+ // "enu"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2538, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "enum"
+ ContextualKeyword._enum << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ex"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2592, -1, -1, -1, 2727, -1, -1, -1, -1, -1, -1,
+ // "exp"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2619, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "expo"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2646, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "expor"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2673, -1, -1, -1, -1, -1, -1,
+ // "export"
+ (tt._export << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2700, -1, -1, -1, -1, -1, -1, -1,
+ // "exports"
+ ContextualKeyword._exports << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ext"
+ -1, -1, -1, -1, -1, 2754, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "exte"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2781, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "exten"
+ -1, -1, -1, -1, 2808, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "extend"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2835, -1, -1, -1, -1, -1, -1, -1,
+ // "extends"
+ (tt._extends << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "f"
+ -1, 2889, -1, -1, -1, -1, -1, -1, -1, 2997, -1, -1, -1, -1, -1, 3159, -1, -1, 3213, -1, -1, 3294, -1, -1, -1, -1, -1,
+ // "fa"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2916, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "fal"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2943, -1, -1, -1, -1, -1, -1, -1,
+ // "fals"
+ -1, -1, -1, -1, -1, 2970, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "false"
+ (tt._false << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "fi"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3024, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "fin"
+ -1, 3051, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "fina"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3078, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "final"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3105, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "finall"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3132, -1,
+ // "finally"
+ (tt._finally << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "fo"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3186, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "for"
+ (tt._for << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "fr"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "fro"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3267, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "from"
+ ContextualKeyword._from << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "fu"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3321, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "fun"
+ -1, -1, -1, 3348, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "func"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3375, -1, -1, -1, -1, -1, -1,
+ // "funct"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 3402, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "functi"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3429, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "functio"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3456, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "function"
+ (tt._function << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "g"
+ -1, -1, -1, -1, -1, 3510, -1, -1, -1, -1, -1, -1, 3564, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ge"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3537, -1, -1, -1, -1, -1, -1,
+ // "get"
+ ContextualKeyword._get << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "gl"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3591, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "glo"
+ -1, -1, 3618, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "glob"
+ -1, 3645, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "globa"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3672, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "global"
+ ContextualKeyword._global << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "i"
+ -1, -1, -1, -1, -1, -1, 3726, -1, -1, -1, -1, -1, -1, 3753, 4077, -1, -1, -1, -1, 4590, -1, -1, -1, -1, -1, -1, -1,
+ // "if"
+ (tt._if << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "im"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3780, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "imp"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3807, -1, -1, 3996, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "impl"
+ -1, -1, -1, -1, -1, 3834, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "imple"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3861, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "implem"
+ -1, -1, -1, -1, -1, 3888, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "impleme"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3915, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "implemen"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3942, -1, -1, -1, -1, -1, -1,
+ // "implement"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3969, -1, -1, -1, -1, -1, -1, -1,
+ // "implements"
+ ContextualKeyword._implements << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "impo"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4023, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "impor"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4050, -1, -1, -1, -1, -1, -1,
+ // "import"
+ (tt._import << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "in"
+ (tt._in << 1) + 1, -1, -1, -1, -1, -1, 4104, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4185, 4401, -1, -1, -1, -1, -1, -1,
+ // "inf"
+ -1, -1, -1, -1, -1, 4131, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "infe"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4158, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "infer"
+ ContextualKeyword._infer << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ins"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4212, -1, -1, -1, -1, -1, -1,
+ // "inst"
+ -1, 4239, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "insta"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4266, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "instan"
+ -1, -1, -1, 4293, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "instanc"
+ -1, -1, -1, -1, -1, 4320, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "instance"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4347, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "instanceo"
+ -1, -1, -1, -1, -1, -1, 4374, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "instanceof"
+ (tt._instanceof << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "int"
+ -1, -1, -1, -1, -1, 4428, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "inte"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4455, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "inter"
+ -1, -1, -1, -1, -1, -1, 4482, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "interf"
+ -1, 4509, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "interfa"
+ -1, -1, -1, 4536, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "interfac"
+ -1, -1, -1, -1, -1, 4563, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "interface"
+ ContextualKeyword._interface << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "is"
+ ContextualKeyword._is << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "k"
+ -1, -1, -1, -1, -1, 4644, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ke"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4671, -1,
+ // "key"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4698, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "keyo"
+ -1, -1, -1, -1, -1, -1, 4725, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "keyof"
+ ContextualKeyword._keyof << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "l"
+ -1, -1, -1, -1, -1, 4779, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "le"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4806, -1, -1, -1, -1, -1, -1,
+ // "let"
+ (tt._let << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "m"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 4860, -1, -1, -1, -1, -1, 4995, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "mi"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4887, -1, -1,
+ // "mix"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 4914, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "mixi"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4941, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "mixin"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4968, -1, -1, -1, -1, -1, -1, -1,
+ // "mixins"
+ ContextualKeyword._mixins << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "mo"
+ -1, -1, -1, -1, 5022, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "mod"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5049, -1, -1, -1, -1, -1,
+ // "modu"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5076, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "modul"
+ -1, -1, -1, -1, -1, 5103, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "module"
+ ContextualKeyword._module << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "n"
+ -1, 5157, -1, -1, -1, 5373, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5427, -1, -1, -1, -1, -1,
+ // "na"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5184, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "nam"
+ -1, -1, -1, -1, -1, 5211, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "name"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5238, -1, -1, -1, -1, -1, -1, -1,
+ // "names"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5265, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "namesp"
+ -1, 5292, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "namespa"
+ -1, -1, -1, 5319, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "namespac"
+ -1, -1, -1, -1, -1, 5346, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "namespace"
+ ContextualKeyword._namespace << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ne"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5400, -1, -1, -1,
+ // "new"
+ (tt._new << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "nu"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5454, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "nul"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5481, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "null"
+ (tt._null << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "o"
+ -1, -1, -1, -1, -1, -1, 5535, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5562, -1, -1, -1, -1, 5697, 5751, -1, -1, -1, -1,
+ // "of"
+ ContextualKeyword._of << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "op"
+ -1, 5589, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "opa"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5616, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "opaq"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5643, -1, -1, -1, -1, -1,
+ // "opaqu"
+ -1, -1, -1, -1, -1, 5670, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "opaque"
+ ContextualKeyword._opaque << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ou"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5724, -1, -1, -1, -1, -1, -1,
+ // "out"
+ ContextualKeyword._out << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ov"
+ -1, -1, -1, -1, -1, 5778, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ove"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5805, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "over"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5832, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "overr"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 5859, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "overri"
+ -1, -1, -1, -1, 5886, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "overrid"
+ -1, -1, -1, -1, -1, 5913, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "override"
+ ContextualKeyword._override << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "p"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5967, -1, -1, 6345, -1, -1, -1, -1, -1,
+ // "pr"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 5994, -1, -1, -1, -1, -1, 6129, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "pri"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6021, -1, -1, -1, -1,
+ // "priv"
+ -1, 6048, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "priva"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6075, -1, -1, -1, -1, -1, -1,
+ // "privat"
+ -1, -1, -1, -1, -1, 6102, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "private"
+ ContextualKeyword._private << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "pro"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6156, -1, -1, -1, -1, -1, -1,
+ // "prot"
+ -1, -1, -1, -1, -1, 6183, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6318, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "prote"
+ -1, -1, -1, 6210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "protec"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6237, -1, -1, -1, -1, -1, -1,
+ // "protect"
+ -1, -1, -1, -1, -1, 6264, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "protecte"
+ -1, -1, -1, -1, 6291, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "protected"
+ ContextualKeyword._protected << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "proto"
+ ContextualKeyword._proto << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "pu"
+ -1, -1, 6372, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "pub"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6399, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "publ"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 6426, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "publi"
+ -1, -1, -1, 6453, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "public"
+ ContextualKeyword._public << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "r"
+ -1, -1, -1, -1, -1, 6507, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "re"
+ -1, 6534, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6696, -1, -1, 6831, -1, -1, -1, -1, -1, -1,
+ // "rea"
+ -1, -1, -1, -1, 6561, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "read"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6588, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "reado"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6615, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "readon"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6642, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "readonl"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6669, -1,
+ // "readonly"
+ ContextualKeyword._readonly << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "req"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6723, -1, -1, -1, -1, -1,
+ // "requ"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 6750, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "requi"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6777, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "requir"
+ -1, -1, -1, -1, -1, 6804, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "require"
+ ContextualKeyword._require << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ret"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6858, -1, -1, -1, -1, -1,
+ // "retu"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6885, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "retur"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6912, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "return"
+ (tt._return << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "s"
+ -1, 6966, -1, -1, -1, 7182, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7236, 7371, -1, 7479, -1, 7614, -1,
+ // "sa"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6993, -1, -1, -1, -1, -1, -1,
+ // "sat"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 7020, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "sati"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7047, -1, -1, -1, -1, -1, -1, -1,
+ // "satis"
+ -1, -1, -1, -1, -1, -1, 7074, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "satisf"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 7101, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "satisfi"
+ -1, -1, -1, -1, -1, 7128, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "satisfie"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7155, -1, -1, -1, -1, -1, -1, -1,
+ // "satisfies"
+ ContextualKeyword._satisfies << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "se"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7209, -1, -1, -1, -1, -1, -1,
+ // "set"
+ ContextualKeyword._set << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "st"
+ -1, 7263, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "sta"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7290, -1, -1, -1, -1, -1, -1,
+ // "stat"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 7317, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "stati"
+ -1, -1, -1, 7344, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "static"
+ ContextualKeyword._static << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "su"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7398, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "sup"
+ -1, -1, -1, -1, -1, 7425, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "supe"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7452, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "super"
+ (tt._super << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "sw"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 7506, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "swi"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7533, -1, -1, -1, -1, -1, -1,
+ // "swit"
+ -1, -1, -1, 7560, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "switc"
+ -1, -1, -1, -1, -1, -1, -1, -1, 7587, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "switch"
+ (tt._switch << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "sy"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7641, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "sym"
+ -1, -1, 7668, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "symb"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7695, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "symbo"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7722, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "symbol"
+ ContextualKeyword._symbol << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "t"
+ -1, -1, -1, -1, -1, -1, -1, -1, 7776, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7938, -1, -1, -1, -1, -1, -1, 8046, -1,
+ // "th"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 7803, -1, -1, -1, -1, -1, -1, -1, -1, 7857, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "thi"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7830, -1, -1, -1, -1, -1, -1, -1,
+ // "this"
+ (tt._this << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "thr"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7884, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "thro"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7911, -1, -1, -1,
+ // "throw"
+ (tt._throw << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "tr"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7965, -1, -1, -1, 8019, -1,
+ // "tru"
+ -1, -1, -1, -1, -1, 7992, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "true"
+ (tt._true << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "try"
+ (tt._try << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "ty"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8073, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "typ"
+ -1, -1, -1, -1, -1, 8100, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "type"
+ ContextualKeyword._type << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8127, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "typeo"
+ -1, -1, -1, -1, -1, -1, 8154, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "typeof"
+ (tt._typeof << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "u"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8208, -1, -1, -1, -1, 8343, -1, -1, -1, -1, -1, -1, -1,
+ // "un"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 8235, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "uni"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8262, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "uniq"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8289, -1, -1, -1, -1, -1,
+ // "uniqu"
+ -1, -1, -1, -1, -1, 8316, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "unique"
+ ContextualKeyword._unique << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "us"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 8370, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "usi"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8397, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "usin"
+ -1, -1, -1, -1, -1, -1, -1, 8424, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "using"
+ ContextualKeyword._using << 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "v"
+ -1, 8478, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8532, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "va"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8505, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "var"
+ (tt._var << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "vo"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 8559, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "voi"
+ -1, -1, -1, -1, 8586, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "void"
+ (tt._void << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "w"
+ -1, -1, -1, -1, -1, -1, -1, -1, 8640, 8748, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "wh"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 8667, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "whi"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8694, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "whil"
+ -1, -1, -1, -1, -1, 8721, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "while"
+ (tt._while << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "wi"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8775, -1, -1, -1, -1, -1, -1,
+ // "wit"
+ -1, -1, -1, -1, -1, -1, -1, -1, 8802, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "with"
+ (tt._with << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "y"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 8856, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "yi"
+ -1, -1, -1, -1, -1, 8883, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "yie"
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8910, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "yiel"
+ -1, -1, -1, -1, 8937, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ // "yield"
+ (tt._yield << 1) + 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+]);
diff --git a/node_modules/sucrase/dist/esm/parser/tokenizer/state.js b/node_modules/sucrase/dist/esm/parser/tokenizer/state.js
new file mode 100644
index 0000000..940cde0
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/tokenizer/state.js
@@ -0,0 +1,106 @@
+
+import {ContextualKeyword} from "./keywords";
+import { TokenType as tt} from "./types";
+
+export class Scope {
+
+
+
+
+ constructor(startTokenIndex, endTokenIndex, isFunctionScope) {
+ this.startTokenIndex = startTokenIndex;
+ this.endTokenIndex = endTokenIndex;
+ this.isFunctionScope = isFunctionScope;
+ }
+}
+
+export class StateSnapshot {
+ constructor(
+ potentialArrowAt,
+ noAnonFunctionType,
+ inDisallowConditionalTypesContext,
+ tokensLength,
+ scopesLength,
+ pos,
+ type,
+ contextualKeyword,
+ start,
+ end,
+ isType,
+ scopeDepth,
+ error,
+ ) {;this.potentialArrowAt = potentialArrowAt;this.noAnonFunctionType = noAnonFunctionType;this.inDisallowConditionalTypesContext = inDisallowConditionalTypesContext;this.tokensLength = tokensLength;this.scopesLength = scopesLength;this.pos = pos;this.type = type;this.contextualKeyword = contextualKeyword;this.start = start;this.end = end;this.isType = isType;this.scopeDepth = scopeDepth;this.error = error;}
+}
+
+export default class State {constructor() { State.prototype.__init.call(this);State.prototype.__init2.call(this);State.prototype.__init3.call(this);State.prototype.__init4.call(this);State.prototype.__init5.call(this);State.prototype.__init6.call(this);State.prototype.__init7.call(this);State.prototype.__init8.call(this);State.prototype.__init9.call(this);State.prototype.__init10.call(this);State.prototype.__init11.call(this);State.prototype.__init12.call(this);State.prototype.__init13.call(this); }
+ // Used to signify the start of a potential arrow function
+ __init() {this.potentialArrowAt = -1}
+
+ // Used by Flow to handle an edge case involving function type parsing.
+ __init2() {this.noAnonFunctionType = false}
+
+ // Used by TypeScript to handle ambiguities when parsing conditional types.
+ __init3() {this.inDisallowConditionalTypesContext = false}
+
+ // Token store.
+ __init4() {this.tokens = []}
+
+ // Array of all observed scopes, ordered by their ending position.
+ __init5() {this.scopes = []}
+
+ // The current position of the tokenizer in the input.
+ __init6() {this.pos = 0}
+
+ // Information about the current token.
+ __init7() {this.type = tt.eof}
+ __init8() {this.contextualKeyword = ContextualKeyword.NONE}
+ __init9() {this.start = 0}
+ __init10() {this.end = 0}
+
+ __init11() {this.isType = false}
+ __init12() {this.scopeDepth = 0}
+
+ /**
+ * If the parser is in an error state, then the token is always tt.eof and all functions can
+ * keep executing but should be written so they don't get into an infinite loop in this situation.
+ *
+ * This approach, combined with the ability to snapshot and restore state, allows us to implement
+ * backtracking without exceptions and without needing to explicitly propagate error states
+ * everywhere.
+ */
+ __init13() {this.error = null}
+
+ snapshot() {
+ return new StateSnapshot(
+ this.potentialArrowAt,
+ this.noAnonFunctionType,
+ this.inDisallowConditionalTypesContext,
+ this.tokens.length,
+ this.scopes.length,
+ this.pos,
+ this.type,
+ this.contextualKeyword,
+ this.start,
+ this.end,
+ this.isType,
+ this.scopeDepth,
+ this.error,
+ );
+ }
+
+ restoreFromSnapshot(snapshot) {
+ this.potentialArrowAt = snapshot.potentialArrowAt;
+ this.noAnonFunctionType = snapshot.noAnonFunctionType;
+ this.inDisallowConditionalTypesContext = snapshot.inDisallowConditionalTypesContext;
+ this.tokens.length = snapshot.tokensLength;
+ this.scopes.length = snapshot.scopesLength;
+ this.pos = snapshot.pos;
+ this.type = snapshot.type;
+ this.contextualKeyword = snapshot.contextualKeyword;
+ this.start = snapshot.start;
+ this.end = snapshot.end;
+ this.isType = snapshot.isType;
+ this.scopeDepth = snapshot.scopeDepth;
+ this.error = snapshot.error;
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/parser/tokenizer/types.js b/node_modules/sucrase/dist/esm/parser/tokenizer/types.js
new file mode 100644
index 0000000..9746ad6
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/tokenizer/types.js
@@ -0,0 +1,361 @@
+// Generated file, do not edit! Run "yarn generate" to re-generate this file.
+/* istanbul ignore file */
+/**
+ * Enum of all token types, with bit fields to signify meaningful properties.
+ */
+export var TokenType; (function (TokenType) {
+ // Precedence 0 means not an operator; otherwise it is a positive number up to 12.
+ const PRECEDENCE_MASK = 0xf; TokenType[TokenType["PRECEDENCE_MASK"] = PRECEDENCE_MASK] = "PRECEDENCE_MASK";
+ const IS_KEYWORD = 1 << 4; TokenType[TokenType["IS_KEYWORD"] = IS_KEYWORD] = "IS_KEYWORD";
+ const IS_ASSIGN = 1 << 5; TokenType[TokenType["IS_ASSIGN"] = IS_ASSIGN] = "IS_ASSIGN";
+ const IS_RIGHT_ASSOCIATIVE = 1 << 6; TokenType[TokenType["IS_RIGHT_ASSOCIATIVE"] = IS_RIGHT_ASSOCIATIVE] = "IS_RIGHT_ASSOCIATIVE";
+ const IS_PREFIX = 1 << 7; TokenType[TokenType["IS_PREFIX"] = IS_PREFIX] = "IS_PREFIX";
+ const IS_POSTFIX = 1 << 8; TokenType[TokenType["IS_POSTFIX"] = IS_POSTFIX] = "IS_POSTFIX";
+ const IS_EXPRESSION_START = 1 << 9; TokenType[TokenType["IS_EXPRESSION_START"] = IS_EXPRESSION_START] = "IS_EXPRESSION_START";
+
+ const num = 512; TokenType[TokenType["num"] = num] = "num"; // num startsExpr
+ const bigint = 1536; TokenType[TokenType["bigint"] = bigint] = "bigint"; // bigint startsExpr
+ const decimal = 2560; TokenType[TokenType["decimal"] = decimal] = "decimal"; // decimal startsExpr
+ const regexp = 3584; TokenType[TokenType["regexp"] = regexp] = "regexp"; // regexp startsExpr
+ const string = 4608; TokenType[TokenType["string"] = string] = "string"; // string startsExpr
+ const name = 5632; TokenType[TokenType["name"] = name] = "name"; // name startsExpr
+ const eof = 6144; TokenType[TokenType["eof"] = eof] = "eof"; // eof
+ const bracketL = 7680; TokenType[TokenType["bracketL"] = bracketL] = "bracketL"; // [ startsExpr
+ const bracketR = 8192; TokenType[TokenType["bracketR"] = bracketR] = "bracketR"; // ]
+ const braceL = 9728; TokenType[TokenType["braceL"] = braceL] = "braceL"; // { startsExpr
+ const braceBarL = 10752; TokenType[TokenType["braceBarL"] = braceBarL] = "braceBarL"; // {| startsExpr
+ const braceR = 11264; TokenType[TokenType["braceR"] = braceR] = "braceR"; // }
+ const braceBarR = 12288; TokenType[TokenType["braceBarR"] = braceBarR] = "braceBarR"; // |}
+ const parenL = 13824; TokenType[TokenType["parenL"] = parenL] = "parenL"; // ( startsExpr
+ const parenR = 14336; TokenType[TokenType["parenR"] = parenR] = "parenR"; // )
+ const comma = 15360; TokenType[TokenType["comma"] = comma] = "comma"; // ,
+ const semi = 16384; TokenType[TokenType["semi"] = semi] = "semi"; // ;
+ const colon = 17408; TokenType[TokenType["colon"] = colon] = "colon"; // :
+ const doubleColon = 18432; TokenType[TokenType["doubleColon"] = doubleColon] = "doubleColon"; // ::
+ const dot = 19456; TokenType[TokenType["dot"] = dot] = "dot"; // .
+ const question = 20480; TokenType[TokenType["question"] = question] = "question"; // ?
+ const questionDot = 21504; TokenType[TokenType["questionDot"] = questionDot] = "questionDot"; // ?.
+ const arrow = 22528; TokenType[TokenType["arrow"] = arrow] = "arrow"; // =>
+ const template = 23552; TokenType[TokenType["template"] = template] = "template"; // template
+ const ellipsis = 24576; TokenType[TokenType["ellipsis"] = ellipsis] = "ellipsis"; // ...
+ const backQuote = 25600; TokenType[TokenType["backQuote"] = backQuote] = "backQuote"; // `
+ const dollarBraceL = 27136; TokenType[TokenType["dollarBraceL"] = dollarBraceL] = "dollarBraceL"; // ${ startsExpr
+ const at = 27648; TokenType[TokenType["at"] = at] = "at"; // @
+ const hash = 29184; TokenType[TokenType["hash"] = hash] = "hash"; // # startsExpr
+ const eq = 29728; TokenType[TokenType["eq"] = eq] = "eq"; // = isAssign
+ const assign = 30752; TokenType[TokenType["assign"] = assign] = "assign"; // _= isAssign
+ const preIncDec = 32640; TokenType[TokenType["preIncDec"] = preIncDec] = "preIncDec"; // ++/-- prefix postfix startsExpr
+ const postIncDec = 33664; TokenType[TokenType["postIncDec"] = postIncDec] = "postIncDec"; // ++/-- prefix postfix startsExpr
+ const bang = 34432; TokenType[TokenType["bang"] = bang] = "bang"; // ! prefix startsExpr
+ const tilde = 35456; TokenType[TokenType["tilde"] = tilde] = "tilde"; // ~ prefix startsExpr
+ const pipeline = 35841; TokenType[TokenType["pipeline"] = pipeline] = "pipeline"; // |> prec:1
+ const nullishCoalescing = 36866; TokenType[TokenType["nullishCoalescing"] = nullishCoalescing] = "nullishCoalescing"; // ?? prec:2
+ const logicalOR = 37890; TokenType[TokenType["logicalOR"] = logicalOR] = "logicalOR"; // || prec:2
+ const logicalAND = 38915; TokenType[TokenType["logicalAND"] = logicalAND] = "logicalAND"; // && prec:3
+ const bitwiseOR = 39940; TokenType[TokenType["bitwiseOR"] = bitwiseOR] = "bitwiseOR"; // | prec:4
+ const bitwiseXOR = 40965; TokenType[TokenType["bitwiseXOR"] = bitwiseXOR] = "bitwiseXOR"; // ^ prec:5
+ const bitwiseAND = 41990; TokenType[TokenType["bitwiseAND"] = bitwiseAND] = "bitwiseAND"; // & prec:6
+ const equality = 43015; TokenType[TokenType["equality"] = equality] = "equality"; // ==/!= prec:7
+ const lessThan = 44040; TokenType[TokenType["lessThan"] = lessThan] = "lessThan"; // < prec:8
+ const greaterThan = 45064; TokenType[TokenType["greaterThan"] = greaterThan] = "greaterThan"; // > prec:8
+ const relationalOrEqual = 46088; TokenType[TokenType["relationalOrEqual"] = relationalOrEqual] = "relationalOrEqual"; // <=/>= prec:8
+ const bitShiftL = 47113; TokenType[TokenType["bitShiftL"] = bitShiftL] = "bitShiftL"; // << prec:9
+ const bitShiftR = 48137; TokenType[TokenType["bitShiftR"] = bitShiftR] = "bitShiftR"; // >>/>>> prec:9
+ const plus = 49802; TokenType[TokenType["plus"] = plus] = "plus"; // + prec:10 prefix startsExpr
+ const minus = 50826; TokenType[TokenType["minus"] = minus] = "minus"; // - prec:10 prefix startsExpr
+ const modulo = 51723; TokenType[TokenType["modulo"] = modulo] = "modulo"; // % prec:11 startsExpr
+ const star = 52235; TokenType[TokenType["star"] = star] = "star"; // * prec:11
+ const slash = 53259; TokenType[TokenType["slash"] = slash] = "slash"; // / prec:11
+ const exponent = 54348; TokenType[TokenType["exponent"] = exponent] = "exponent"; // ** prec:12 rightAssociative
+ const jsxName = 55296; TokenType[TokenType["jsxName"] = jsxName] = "jsxName"; // jsxName
+ const jsxText = 56320; TokenType[TokenType["jsxText"] = jsxText] = "jsxText"; // jsxText
+ const jsxEmptyText = 57344; TokenType[TokenType["jsxEmptyText"] = jsxEmptyText] = "jsxEmptyText"; // jsxEmptyText
+ const jsxTagStart = 58880; TokenType[TokenType["jsxTagStart"] = jsxTagStart] = "jsxTagStart"; // jsxTagStart startsExpr
+ const jsxTagEnd = 59392; TokenType[TokenType["jsxTagEnd"] = jsxTagEnd] = "jsxTagEnd"; // jsxTagEnd
+ const typeParameterStart = 60928; TokenType[TokenType["typeParameterStart"] = typeParameterStart] = "typeParameterStart"; // typeParameterStart startsExpr
+ const nonNullAssertion = 61440; TokenType[TokenType["nonNullAssertion"] = nonNullAssertion] = "nonNullAssertion"; // nonNullAssertion
+ const _break = 62480; TokenType[TokenType["_break"] = _break] = "_break"; // break keyword
+ const _case = 63504; TokenType[TokenType["_case"] = _case] = "_case"; // case keyword
+ const _catch = 64528; TokenType[TokenType["_catch"] = _catch] = "_catch"; // catch keyword
+ const _continue = 65552; TokenType[TokenType["_continue"] = _continue] = "_continue"; // continue keyword
+ const _debugger = 66576; TokenType[TokenType["_debugger"] = _debugger] = "_debugger"; // debugger keyword
+ const _default = 67600; TokenType[TokenType["_default"] = _default] = "_default"; // default keyword
+ const _do = 68624; TokenType[TokenType["_do"] = _do] = "_do"; // do keyword
+ const _else = 69648; TokenType[TokenType["_else"] = _else] = "_else"; // else keyword
+ const _finally = 70672; TokenType[TokenType["_finally"] = _finally] = "_finally"; // finally keyword
+ const _for = 71696; TokenType[TokenType["_for"] = _for] = "_for"; // for keyword
+ const _function = 73232; TokenType[TokenType["_function"] = _function] = "_function"; // function keyword startsExpr
+ const _if = 73744; TokenType[TokenType["_if"] = _if] = "_if"; // if keyword
+ const _return = 74768; TokenType[TokenType["_return"] = _return] = "_return"; // return keyword
+ const _switch = 75792; TokenType[TokenType["_switch"] = _switch] = "_switch"; // switch keyword
+ const _throw = 77456; TokenType[TokenType["_throw"] = _throw] = "_throw"; // throw keyword prefix startsExpr
+ const _try = 77840; TokenType[TokenType["_try"] = _try] = "_try"; // try keyword
+ const _var = 78864; TokenType[TokenType["_var"] = _var] = "_var"; // var keyword
+ const _let = 79888; TokenType[TokenType["_let"] = _let] = "_let"; // let keyword
+ const _const = 80912; TokenType[TokenType["_const"] = _const] = "_const"; // const keyword
+ const _while = 81936; TokenType[TokenType["_while"] = _while] = "_while"; // while keyword
+ const _with = 82960; TokenType[TokenType["_with"] = _with] = "_with"; // with keyword
+ const _new = 84496; TokenType[TokenType["_new"] = _new] = "_new"; // new keyword startsExpr
+ const _this = 85520; TokenType[TokenType["_this"] = _this] = "_this"; // this keyword startsExpr
+ const _super = 86544; TokenType[TokenType["_super"] = _super] = "_super"; // super keyword startsExpr
+ const _class = 87568; TokenType[TokenType["_class"] = _class] = "_class"; // class keyword startsExpr
+ const _extends = 88080; TokenType[TokenType["_extends"] = _extends] = "_extends"; // extends keyword
+ const _export = 89104; TokenType[TokenType["_export"] = _export] = "_export"; // export keyword
+ const _import = 90640; TokenType[TokenType["_import"] = _import] = "_import"; // import keyword startsExpr
+ const _yield = 91664; TokenType[TokenType["_yield"] = _yield] = "_yield"; // yield keyword startsExpr
+ const _null = 92688; TokenType[TokenType["_null"] = _null] = "_null"; // null keyword startsExpr
+ const _true = 93712; TokenType[TokenType["_true"] = _true] = "_true"; // true keyword startsExpr
+ const _false = 94736; TokenType[TokenType["_false"] = _false] = "_false"; // false keyword startsExpr
+ const _in = 95256; TokenType[TokenType["_in"] = _in] = "_in"; // in prec:8 keyword
+ const _instanceof = 96280; TokenType[TokenType["_instanceof"] = _instanceof] = "_instanceof"; // instanceof prec:8 keyword
+ const _typeof = 97936; TokenType[TokenType["_typeof"] = _typeof] = "_typeof"; // typeof keyword prefix startsExpr
+ const _void = 98960; TokenType[TokenType["_void"] = _void] = "_void"; // void keyword prefix startsExpr
+ const _delete = 99984; TokenType[TokenType["_delete"] = _delete] = "_delete"; // delete keyword prefix startsExpr
+ const _async = 100880; TokenType[TokenType["_async"] = _async] = "_async"; // async keyword startsExpr
+ const _get = 101904; TokenType[TokenType["_get"] = _get] = "_get"; // get keyword startsExpr
+ const _set = 102928; TokenType[TokenType["_set"] = _set] = "_set"; // set keyword startsExpr
+ const _declare = 103952; TokenType[TokenType["_declare"] = _declare] = "_declare"; // declare keyword startsExpr
+ const _readonly = 104976; TokenType[TokenType["_readonly"] = _readonly] = "_readonly"; // readonly keyword startsExpr
+ const _abstract = 106000; TokenType[TokenType["_abstract"] = _abstract] = "_abstract"; // abstract keyword startsExpr
+ const _static = 107024; TokenType[TokenType["_static"] = _static] = "_static"; // static keyword startsExpr
+ const _public = 107536; TokenType[TokenType["_public"] = _public] = "_public"; // public keyword
+ const _private = 108560; TokenType[TokenType["_private"] = _private] = "_private"; // private keyword
+ const _protected = 109584; TokenType[TokenType["_protected"] = _protected] = "_protected"; // protected keyword
+ const _override = 110608; TokenType[TokenType["_override"] = _override] = "_override"; // override keyword
+ const _as = 112144; TokenType[TokenType["_as"] = _as] = "_as"; // as keyword startsExpr
+ const _enum = 113168; TokenType[TokenType["_enum"] = _enum] = "_enum"; // enum keyword startsExpr
+ const _type = 114192; TokenType[TokenType["_type"] = _type] = "_type"; // type keyword startsExpr
+ const _implements = 115216; TokenType[TokenType["_implements"] = _implements] = "_implements"; // implements keyword startsExpr
+})(TokenType || (TokenType = {}));
+export function formatTokenType(tokenType) {
+ switch (tokenType) {
+ case TokenType.num:
+ return "num";
+ case TokenType.bigint:
+ return "bigint";
+ case TokenType.decimal:
+ return "decimal";
+ case TokenType.regexp:
+ return "regexp";
+ case TokenType.string:
+ return "string";
+ case TokenType.name:
+ return "name";
+ case TokenType.eof:
+ return "eof";
+ case TokenType.bracketL:
+ return "[";
+ case TokenType.bracketR:
+ return "]";
+ case TokenType.braceL:
+ return "{";
+ case TokenType.braceBarL:
+ return "{|";
+ case TokenType.braceR:
+ return "}";
+ case TokenType.braceBarR:
+ return "|}";
+ case TokenType.parenL:
+ return "(";
+ case TokenType.parenR:
+ return ")";
+ case TokenType.comma:
+ return ",";
+ case TokenType.semi:
+ return ";";
+ case TokenType.colon:
+ return ":";
+ case TokenType.doubleColon:
+ return "::";
+ case TokenType.dot:
+ return ".";
+ case TokenType.question:
+ return "?";
+ case TokenType.questionDot:
+ return "?.";
+ case TokenType.arrow:
+ return "=>";
+ case TokenType.template:
+ return "template";
+ case TokenType.ellipsis:
+ return "...";
+ case TokenType.backQuote:
+ return "`";
+ case TokenType.dollarBraceL:
+ return "${";
+ case TokenType.at:
+ return "@";
+ case TokenType.hash:
+ return "#";
+ case TokenType.eq:
+ return "=";
+ case TokenType.assign:
+ return "_=";
+ case TokenType.preIncDec:
+ return "++/--";
+ case TokenType.postIncDec:
+ return "++/--";
+ case TokenType.bang:
+ return "!";
+ case TokenType.tilde:
+ return "~";
+ case TokenType.pipeline:
+ return "|>";
+ case TokenType.nullishCoalescing:
+ return "??";
+ case TokenType.logicalOR:
+ return "||";
+ case TokenType.logicalAND:
+ return "&&";
+ case TokenType.bitwiseOR:
+ return "|";
+ case TokenType.bitwiseXOR:
+ return "^";
+ case TokenType.bitwiseAND:
+ return "&";
+ case TokenType.equality:
+ return "==/!=";
+ case TokenType.lessThan:
+ return "<";
+ case TokenType.greaterThan:
+ return ">";
+ case TokenType.relationalOrEqual:
+ return "<=/>=";
+ case TokenType.bitShiftL:
+ return "<<";
+ case TokenType.bitShiftR:
+ return ">>/>>>";
+ case TokenType.plus:
+ return "+";
+ case TokenType.minus:
+ return "-";
+ case TokenType.modulo:
+ return "%";
+ case TokenType.star:
+ return "*";
+ case TokenType.slash:
+ return "/";
+ case TokenType.exponent:
+ return "**";
+ case TokenType.jsxName:
+ return "jsxName";
+ case TokenType.jsxText:
+ return "jsxText";
+ case TokenType.jsxEmptyText:
+ return "jsxEmptyText";
+ case TokenType.jsxTagStart:
+ return "jsxTagStart";
+ case TokenType.jsxTagEnd:
+ return "jsxTagEnd";
+ case TokenType.typeParameterStart:
+ return "typeParameterStart";
+ case TokenType.nonNullAssertion:
+ return "nonNullAssertion";
+ case TokenType._break:
+ return "break";
+ case TokenType._case:
+ return "case";
+ case TokenType._catch:
+ return "catch";
+ case TokenType._continue:
+ return "continue";
+ case TokenType._debugger:
+ return "debugger";
+ case TokenType._default:
+ return "default";
+ case TokenType._do:
+ return "do";
+ case TokenType._else:
+ return "else";
+ case TokenType._finally:
+ return "finally";
+ case TokenType._for:
+ return "for";
+ case TokenType._function:
+ return "function";
+ case TokenType._if:
+ return "if";
+ case TokenType._return:
+ return "return";
+ case TokenType._switch:
+ return "switch";
+ case TokenType._throw:
+ return "throw";
+ case TokenType._try:
+ return "try";
+ case TokenType._var:
+ return "var";
+ case TokenType._let:
+ return "let";
+ case TokenType._const:
+ return "const";
+ case TokenType._while:
+ return "while";
+ case TokenType._with:
+ return "with";
+ case TokenType._new:
+ return "new";
+ case TokenType._this:
+ return "this";
+ case TokenType._super:
+ return "super";
+ case TokenType._class:
+ return "class";
+ case TokenType._extends:
+ return "extends";
+ case TokenType._export:
+ return "export";
+ case TokenType._import:
+ return "import";
+ case TokenType._yield:
+ return "yield";
+ case TokenType._null:
+ return "null";
+ case TokenType._true:
+ return "true";
+ case TokenType._false:
+ return "false";
+ case TokenType._in:
+ return "in";
+ case TokenType._instanceof:
+ return "instanceof";
+ case TokenType._typeof:
+ return "typeof";
+ case TokenType._void:
+ return "void";
+ case TokenType._delete:
+ return "delete";
+ case TokenType._async:
+ return "async";
+ case TokenType._get:
+ return "get";
+ case TokenType._set:
+ return "set";
+ case TokenType._declare:
+ return "declare";
+ case TokenType._readonly:
+ return "readonly";
+ case TokenType._abstract:
+ return "abstract";
+ case TokenType._static:
+ return "static";
+ case TokenType._public:
+ return "public";
+ case TokenType._private:
+ return "private";
+ case TokenType._protected:
+ return "protected";
+ case TokenType._override:
+ return "override";
+ case TokenType._as:
+ return "as";
+ case TokenType._enum:
+ return "enum";
+ case TokenType._type:
+ return "type";
+ case TokenType._implements:
+ return "implements";
+ default:
+ return "";
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/parser/traverser/base.js b/node_modules/sucrase/dist/esm/parser/traverser/base.js
new file mode 100644
index 0000000..df24ff7
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/traverser/base.js
@@ -0,0 +1,60 @@
+import State from "../tokenizer/state";
+import {charCodes} from "../util/charcodes";
+
+export let isJSXEnabled;
+export let isTypeScriptEnabled;
+export let isFlowEnabled;
+export let state;
+export let input;
+export let nextContextId;
+
+export function getNextContextId() {
+ return nextContextId++;
+}
+
+// eslint-disable-next-line @typescript-eslint/no-explicit-any
+export function augmentError(error) {
+ if ("pos" in error) {
+ const loc = locationForIndex(error.pos);
+ error.message += ` (${loc.line}:${loc.column})`;
+ error.loc = loc;
+ }
+ return error;
+}
+
+export class Loc {
+
+
+ constructor(line, column) {
+ this.line = line;
+ this.column = column;
+ }
+}
+
+export function locationForIndex(pos) {
+ let line = 1;
+ let column = 1;
+ for (let i = 0; i < pos; i++) {
+ if (input.charCodeAt(i) === charCodes.lineFeed) {
+ line++;
+ column = 1;
+ } else {
+ column++;
+ }
+ }
+ return new Loc(line, column);
+}
+
+export function initParser(
+ inputCode,
+ isJSXEnabledArg,
+ isTypeScriptEnabledArg,
+ isFlowEnabledArg,
+) {
+ input = inputCode;
+ state = new State();
+ nextContextId = 1;
+ isJSXEnabled = isJSXEnabledArg;
+ isTypeScriptEnabled = isTypeScriptEnabledArg;
+ isFlowEnabled = isFlowEnabledArg;
+}
diff --git a/node_modules/sucrase/dist/esm/parser/traverser/expression.js b/node_modules/sucrase/dist/esm/parser/traverser/expression.js
new file mode 100644
index 0000000..aa6717f
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/traverser/expression.js
@@ -0,0 +1,1022 @@
+/* eslint max-len: 0 */
+
+// A recursive descent parser operates by defining functions for all
+// syntactic elements, and recursively calling those, each function
+// advancing the input stream and returning an AST node. Precedence
+// of constructs (for example, the fact that `!x[1]` means `!(x[1])`
+// instead of `(!x)[1]` is handled by the fact that the parser
+// function that parses unary prefix operators is called first, and
+// in turn calls the function that parses `[]` subscripts — that
+// way, it'll receive the node for `x[1]` already parsed, and wraps
+// *that* in the unary operator node.
+//
+// Acorn uses an [operator precedence parser][opp] to handle binary
+// operator precedence, because it is much more compact than using
+// the technique outlined above, which uses different, nesting
+// functions to specify precedence, for all of the ten binary
+// precedence levels that JavaScript defines.
+//
+// [opp]: http://en.wikipedia.org/wiki/Operator-precedence_parser
+
+import {
+ flowParseArrow,
+ flowParseFunctionBodyAndFinish,
+ flowParseMaybeAssign,
+ flowParseSubscript,
+ flowParseSubscripts,
+ flowParseVariance,
+ flowStartParseAsyncArrowFromCallExpression,
+ flowStartParseNewArguments,
+ flowStartParseObjPropValue,
+} from "../plugins/flow";
+import {jsxParseElement} from "../plugins/jsx/index";
+import {typedParseConditional, typedParseParenItem} from "../plugins/types";
+import {
+ tsParseArrow,
+ tsParseFunctionBodyAndFinish,
+ tsParseMaybeAssign,
+ tsParseSubscript,
+ tsParseType,
+ tsParseTypeAssertion,
+ tsStartParseAsyncArrowFromCallExpression,
+ tsStartParseObjPropValue,
+} from "../plugins/typescript";
+import {
+ eat,
+ IdentifierRole,
+ lookaheadCharCode,
+ lookaheadType,
+ match,
+ next,
+ nextTemplateToken,
+ popTypeContext,
+ pushTypeContext,
+ rescan_gt,
+ retokenizeSlashAsRegex,
+} from "../tokenizer/index";
+import {ContextualKeyword} from "../tokenizer/keywords";
+import {Scope} from "../tokenizer/state";
+import {TokenType, TokenType as tt} from "../tokenizer/types";
+import {charCodes} from "../util/charcodes";
+import {IS_IDENTIFIER_START} from "../util/identifier";
+import {getNextContextId, isFlowEnabled, isJSXEnabled, isTypeScriptEnabled, state} from "./base";
+import {
+ markPriorBindingIdentifier,
+ parseBindingIdentifier,
+ parseMaybeDefault,
+ parseRest,
+ parseSpread,
+} from "./lval";
+import {
+ parseBlock,
+ parseBlockBody,
+ parseClass,
+ parseDecorators,
+ parseFunction,
+ parseFunctionParams,
+} from "./statement";
+import {
+ canInsertSemicolon,
+ eatContextual,
+ expect,
+ expectContextual,
+ hasFollowingLineBreak,
+ hasPrecedingLineBreak,
+ isContextual,
+ unexpected,
+} from "./util";
+
+export class StopState {
+
+ constructor(stop) {
+ this.stop = stop;
+ }
+}
+
+// ### Expression parsing
+
+// These nest, from the most general expression type at the top to
+// 'atomic', nondivisible expression types at the bottom. Most of
+// the functions will simply let the function (s) below them parse,
+// and, *if* the syntactic construct they handle is present, wrap
+// the AST node that the inner parser gave them in another node.
+export function parseExpression(noIn = false) {
+ parseMaybeAssign(noIn);
+ if (match(tt.comma)) {
+ while (eat(tt.comma)) {
+ parseMaybeAssign(noIn);
+ }
+ }
+}
+
+/**
+ * noIn is used when parsing a for loop so that we don't interpret a following "in" as the binary
+ * operatior.
+ * isWithinParens is used to indicate that we're parsing something that might be a comma expression
+ * or might be an arrow function or might be a Flow type assertion (which requires explicit parens).
+ * In these cases, we should allow : and ?: after the initial "left" part.
+ */
+export function parseMaybeAssign(noIn = false, isWithinParens = false) {
+ if (isTypeScriptEnabled) {
+ return tsParseMaybeAssign(noIn, isWithinParens);
+ } else if (isFlowEnabled) {
+ return flowParseMaybeAssign(noIn, isWithinParens);
+ } else {
+ return baseParseMaybeAssign(noIn, isWithinParens);
+ }
+}
+
+// Parse an assignment expression. This includes applications of
+// operators like `+=`.
+// Returns true if the expression was an arrow function.
+export function baseParseMaybeAssign(noIn, isWithinParens) {
+ if (match(tt._yield)) {
+ parseYield();
+ return false;
+ }
+
+ if (match(tt.parenL) || match(tt.name) || match(tt._yield)) {
+ state.potentialArrowAt = state.start;
+ }
+
+ const wasArrow = parseMaybeConditional(noIn);
+ if (isWithinParens) {
+ parseParenItem();
+ }
+ if (state.type & TokenType.IS_ASSIGN) {
+ next();
+ parseMaybeAssign(noIn);
+ return false;
+ }
+ return wasArrow;
+}
+
+// Parse a ternary conditional (`?:`) operator.
+// Returns true if the expression was an arrow function.
+function parseMaybeConditional(noIn) {
+ const wasArrow = parseExprOps(noIn);
+ if (wasArrow) {
+ return true;
+ }
+ parseConditional(noIn);
+ return false;
+}
+
+function parseConditional(noIn) {
+ if (isTypeScriptEnabled || isFlowEnabled) {
+ typedParseConditional(noIn);
+ } else {
+ baseParseConditional(noIn);
+ }
+}
+
+export function baseParseConditional(noIn) {
+ if (eat(tt.question)) {
+ parseMaybeAssign();
+ expect(tt.colon);
+ parseMaybeAssign(noIn);
+ }
+}
+
+// Start the precedence parser.
+// Returns true if this was an arrow function
+function parseExprOps(noIn) {
+ const startTokenIndex = state.tokens.length;
+ const wasArrow = parseMaybeUnary();
+ if (wasArrow) {
+ return true;
+ }
+ parseExprOp(startTokenIndex, -1, noIn);
+ return false;
+}
+
+// Parse binary operators with the operator precedence parsing
+// algorithm. `left` is the left-hand side of the operator.
+// `minPrec` provides context that allows the function to stop and
+// defer further parser to one of its callers when it encounters an
+// operator that has a lower precedence than the set it is parsing.
+function parseExprOp(startTokenIndex, minPrec, noIn) {
+ if (
+ isTypeScriptEnabled &&
+ (tt._in & TokenType.PRECEDENCE_MASK) > minPrec &&
+ !hasPrecedingLineBreak() &&
+ (eatContextual(ContextualKeyword._as) || eatContextual(ContextualKeyword._satisfies))
+ ) {
+ const oldIsType = pushTypeContext(1);
+ tsParseType();
+ popTypeContext(oldIsType);
+ rescan_gt();
+ parseExprOp(startTokenIndex, minPrec, noIn);
+ return;
+ }
+
+ const prec = state.type & TokenType.PRECEDENCE_MASK;
+ if (prec > 0 && (!noIn || !match(tt._in))) {
+ if (prec > minPrec) {
+ const op = state.type;
+ next();
+ if (op === tt.nullishCoalescing) {
+ state.tokens[state.tokens.length - 1].nullishStartIndex = startTokenIndex;
+ }
+
+ const rhsStartTokenIndex = state.tokens.length;
+ parseMaybeUnary();
+ // Extend the right operand of this operator if possible.
+ parseExprOp(rhsStartTokenIndex, op & TokenType.IS_RIGHT_ASSOCIATIVE ? prec - 1 : prec, noIn);
+ if (op === tt.nullishCoalescing) {
+ state.tokens[startTokenIndex].numNullishCoalesceStarts++;
+ state.tokens[state.tokens.length - 1].numNullishCoalesceEnds++;
+ }
+ // Continue with any future operator holding this expression as the left operand.
+ parseExprOp(startTokenIndex, minPrec, noIn);
+ }
+ }
+}
+
+// Parse unary operators, both prefix and postfix.
+// Returns true if this was an arrow function.
+export function parseMaybeUnary() {
+ if (isTypeScriptEnabled && !isJSXEnabled && eat(tt.lessThan)) {
+ tsParseTypeAssertion();
+ return false;
+ }
+ if (
+ isContextual(ContextualKeyword._module) &&
+ lookaheadCharCode() === charCodes.leftCurlyBrace &&
+ !hasFollowingLineBreak()
+ ) {
+ parseModuleExpression();
+ return false;
+ }
+ if (state.type & TokenType.IS_PREFIX) {
+ next();
+ parseMaybeUnary();
+ return false;
+ }
+
+ const wasArrow = parseExprSubscripts();
+ if (wasArrow) {
+ return true;
+ }
+ while (state.type & TokenType.IS_POSTFIX && !canInsertSemicolon()) {
+ // The tokenizer calls everything a preincrement, so make it a postincrement when
+ // we see it in that context.
+ if (state.type === tt.preIncDec) {
+ state.type = tt.postIncDec;
+ }
+ next();
+ }
+ return false;
+}
+
+// Parse call, dot, and `[]`-subscript expressions.
+// Returns true if this was an arrow function.
+export function parseExprSubscripts() {
+ const startTokenIndex = state.tokens.length;
+ const wasArrow = parseExprAtom();
+ if (wasArrow) {
+ return true;
+ }
+ parseSubscripts(startTokenIndex);
+ // If there was any optional chain operation, the start token would be marked
+ // as such, so also mark the end now.
+ if (state.tokens.length > startTokenIndex && state.tokens[startTokenIndex].isOptionalChainStart) {
+ state.tokens[state.tokens.length - 1].isOptionalChainEnd = true;
+ }
+ return false;
+}
+
+function parseSubscripts(startTokenIndex, noCalls = false) {
+ if (isFlowEnabled) {
+ flowParseSubscripts(startTokenIndex, noCalls);
+ } else {
+ baseParseSubscripts(startTokenIndex, noCalls);
+ }
+}
+
+export function baseParseSubscripts(startTokenIndex, noCalls = false) {
+ const stopState = new StopState(false);
+ do {
+ parseSubscript(startTokenIndex, noCalls, stopState);
+ } while (!stopState.stop && !state.error);
+}
+
+function parseSubscript(startTokenIndex, noCalls, stopState) {
+ if (isTypeScriptEnabled) {
+ tsParseSubscript(startTokenIndex, noCalls, stopState);
+ } else if (isFlowEnabled) {
+ flowParseSubscript(startTokenIndex, noCalls, stopState);
+ } else {
+ baseParseSubscript(startTokenIndex, noCalls, stopState);
+ }
+}
+
+/** Set 'state.stop = true' to indicate that we should stop parsing subscripts. */
+export function baseParseSubscript(
+ startTokenIndex,
+ noCalls,
+ stopState,
+) {
+ if (!noCalls && eat(tt.doubleColon)) {
+ parseNoCallExpr();
+ stopState.stop = true;
+ // Propagate startTokenIndex so that `a::b?.()` will keep `a` as the first token. We may want
+ // to revisit this in the future when fully supporting bind syntax.
+ parseSubscripts(startTokenIndex, noCalls);
+ } else if (match(tt.questionDot)) {
+ state.tokens[startTokenIndex].isOptionalChainStart = true;
+ if (noCalls && lookaheadType() === tt.parenL) {
+ stopState.stop = true;
+ return;
+ }
+ next();
+ state.tokens[state.tokens.length - 1].subscriptStartIndex = startTokenIndex;
+
+ if (eat(tt.bracketL)) {
+ parseExpression();
+ expect(tt.bracketR);
+ } else if (eat(tt.parenL)) {
+ parseCallExpressionArguments();
+ } else {
+ parseMaybePrivateName();
+ }
+ } else if (eat(tt.dot)) {
+ state.tokens[state.tokens.length - 1].subscriptStartIndex = startTokenIndex;
+ parseMaybePrivateName();
+ } else if (eat(tt.bracketL)) {
+ state.tokens[state.tokens.length - 1].subscriptStartIndex = startTokenIndex;
+ parseExpression();
+ expect(tt.bracketR);
+ } else if (!noCalls && match(tt.parenL)) {
+ if (atPossibleAsync()) {
+ // We see "async", but it's possible it's a usage of the name "async". Parse as if it's a
+ // function call, and if we see an arrow later, backtrack and re-parse as a parameter list.
+ const snapshot = state.snapshot();
+ const asyncStartTokenIndex = state.tokens.length;
+ next();
+ state.tokens[state.tokens.length - 1].subscriptStartIndex = startTokenIndex;
+
+ const callContextId = getNextContextId();
+
+ state.tokens[state.tokens.length - 1].contextId = callContextId;
+ parseCallExpressionArguments();
+ state.tokens[state.tokens.length - 1].contextId = callContextId;
+
+ if (shouldParseAsyncArrow()) {
+ // We hit an arrow, so backtrack and start again parsing function parameters.
+ state.restoreFromSnapshot(snapshot);
+ stopState.stop = true;
+ state.scopeDepth++;
+
+ parseFunctionParams();
+ parseAsyncArrowFromCallExpression(asyncStartTokenIndex);
+ }
+ } else {
+ next();
+ state.tokens[state.tokens.length - 1].subscriptStartIndex = startTokenIndex;
+ const callContextId = getNextContextId();
+ state.tokens[state.tokens.length - 1].contextId = callContextId;
+ parseCallExpressionArguments();
+ state.tokens[state.tokens.length - 1].contextId = callContextId;
+ }
+ } else if (match(tt.backQuote)) {
+ // Tagged template expression.
+ parseTemplate();
+ } else {
+ stopState.stop = true;
+ }
+}
+
+export function atPossibleAsync() {
+ // This was made less strict than the original version to avoid passing around nodes, but it
+ // should be safe to have rare false positives here.
+ return (
+ state.tokens[state.tokens.length - 1].contextualKeyword === ContextualKeyword._async &&
+ !canInsertSemicolon()
+ );
+}
+
+export function parseCallExpressionArguments() {
+ let first = true;
+ while (!eat(tt.parenR) && !state.error) {
+ if (first) {
+ first = false;
+ } else {
+ expect(tt.comma);
+ if (eat(tt.parenR)) {
+ break;
+ }
+ }
+
+ parseExprListItem(false);
+ }
+}
+
+function shouldParseAsyncArrow() {
+ return match(tt.colon) || match(tt.arrow);
+}
+
+function parseAsyncArrowFromCallExpression(startTokenIndex) {
+ if (isTypeScriptEnabled) {
+ tsStartParseAsyncArrowFromCallExpression();
+ } else if (isFlowEnabled) {
+ flowStartParseAsyncArrowFromCallExpression();
+ }
+ expect(tt.arrow);
+ parseArrowExpression(startTokenIndex);
+}
+
+// Parse a no-call expression (like argument of `new` or `::` operators).
+
+function parseNoCallExpr() {
+ const startTokenIndex = state.tokens.length;
+ parseExprAtom();
+ parseSubscripts(startTokenIndex, true);
+}
+
+// Parse an atomic expression — either a single token that is an
+// expression, an expression started by a keyword like `function` or
+// `new`, or an expression wrapped in punctuation like `()`, `[]`,
+// or `{}`.
+// Returns true if the parsed expression was an arrow function.
+export function parseExprAtom() {
+ if (eat(tt.modulo)) {
+ // V8 intrinsic expression. Just parse the identifier, and the function invocation is parsed
+ // naturally.
+ parseIdentifier();
+ return false;
+ }
+
+ if (match(tt.jsxText) || match(tt.jsxEmptyText)) {
+ parseLiteral();
+ return false;
+ } else if (match(tt.lessThan) && isJSXEnabled) {
+ state.type = tt.jsxTagStart;
+ jsxParseElement();
+ next();
+ return false;
+ }
+
+ const canBeArrow = state.potentialArrowAt === state.start;
+ switch (state.type) {
+ case tt.slash:
+ case tt.assign:
+ retokenizeSlashAsRegex();
+ // Fall through.
+
+ case tt._super:
+ case tt._this:
+ case tt.regexp:
+ case tt.num:
+ case tt.bigint:
+ case tt.decimal:
+ case tt.string:
+ case tt._null:
+ case tt._true:
+ case tt._false:
+ next();
+ return false;
+
+ case tt._import:
+ next();
+ if (match(tt.dot)) {
+ // import.meta
+ state.tokens[state.tokens.length - 1].type = tt.name;
+ next();
+ parseIdentifier();
+ }
+ return false;
+
+ case tt.name: {
+ const startTokenIndex = state.tokens.length;
+ const functionStart = state.start;
+ const contextualKeyword = state.contextualKeyword;
+ parseIdentifier();
+ if (contextualKeyword === ContextualKeyword._await) {
+ parseAwait();
+ return false;
+ } else if (
+ contextualKeyword === ContextualKeyword._async &&
+ match(tt._function) &&
+ !canInsertSemicolon()
+ ) {
+ next();
+ parseFunction(functionStart, false);
+ return false;
+ } else if (
+ canBeArrow &&
+ contextualKeyword === ContextualKeyword._async &&
+ !canInsertSemicolon() &&
+ match(tt.name)
+ ) {
+ state.scopeDepth++;
+ parseBindingIdentifier(false);
+ expect(tt.arrow);
+ // let foo = async bar => {};
+ parseArrowExpression(startTokenIndex);
+ return true;
+ } else if (match(tt._do) && !canInsertSemicolon()) {
+ next();
+ parseBlock();
+ return false;
+ }
+
+ if (canBeArrow && !canInsertSemicolon() && match(tt.arrow)) {
+ state.scopeDepth++;
+ markPriorBindingIdentifier(false);
+ expect(tt.arrow);
+ parseArrowExpression(startTokenIndex);
+ return true;
+ }
+
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.Access;
+ return false;
+ }
+
+ case tt._do: {
+ next();
+ parseBlock();
+ return false;
+ }
+
+ case tt.parenL: {
+ const wasArrow = parseParenAndDistinguishExpression(canBeArrow);
+ return wasArrow;
+ }
+
+ case tt.bracketL:
+ next();
+ parseExprList(tt.bracketR, true);
+ return false;
+
+ case tt.braceL:
+ parseObj(false, false);
+ return false;
+
+ case tt._function:
+ parseFunctionExpression();
+ return false;
+
+ case tt.at:
+ parseDecorators();
+ // Fall through.
+
+ case tt._class:
+ parseClass(false);
+ return false;
+
+ case tt._new:
+ parseNew();
+ return false;
+
+ case tt.backQuote:
+ parseTemplate();
+ return false;
+
+ case tt.doubleColon: {
+ next();
+ parseNoCallExpr();
+ return false;
+ }
+
+ case tt.hash: {
+ const code = lookaheadCharCode();
+ if (IS_IDENTIFIER_START[code] || code === charCodes.backslash) {
+ parseMaybePrivateName();
+ } else {
+ next();
+ }
+ // Smart pipeline topic reference.
+ return false;
+ }
+
+ default:
+ unexpected();
+ return false;
+ }
+}
+
+function parseMaybePrivateName() {
+ eat(tt.hash);
+ parseIdentifier();
+}
+
+function parseFunctionExpression() {
+ const functionStart = state.start;
+ parseIdentifier();
+ if (eat(tt.dot)) {
+ // function.sent
+ parseIdentifier();
+ }
+ parseFunction(functionStart, false);
+}
+
+export function parseLiteral() {
+ next();
+}
+
+export function parseParenExpression() {
+ expect(tt.parenL);
+ parseExpression();
+ expect(tt.parenR);
+}
+
+// Returns true if this was an arrow expression.
+function parseParenAndDistinguishExpression(canBeArrow) {
+ // Assume this is a normal parenthesized expression, but if we see an arrow, we'll bail and
+ // start over as a parameter list.
+ const snapshot = state.snapshot();
+
+ const startTokenIndex = state.tokens.length;
+ expect(tt.parenL);
+
+ let first = true;
+
+ while (!match(tt.parenR) && !state.error) {
+ if (first) {
+ first = false;
+ } else {
+ expect(tt.comma);
+ if (match(tt.parenR)) {
+ break;
+ }
+ }
+
+ if (match(tt.ellipsis)) {
+ parseRest(false /* isBlockScope */);
+ parseParenItem();
+ break;
+ } else {
+ parseMaybeAssign(false, true);
+ }
+ }
+
+ expect(tt.parenR);
+
+ if (canBeArrow && shouldParseArrow()) {
+ const wasArrow = parseArrow();
+ if (wasArrow) {
+ // It was an arrow function this whole time, so start over and parse it as params so that we
+ // get proper token annotations.
+ state.restoreFromSnapshot(snapshot);
+ state.scopeDepth++;
+ // Don't specify a context ID because arrow functions don't need a context ID.
+ parseFunctionParams();
+ parseArrow();
+ parseArrowExpression(startTokenIndex);
+ if (state.error) {
+ // Nevermind! This must have been something that looks very much like an
+ // arrow function but where its "parameter list" isn't actually a valid
+ // parameter list. Force non-arrow parsing.
+ // See https://github.com/alangpierce/sucrase/issues/666 for an example.
+ state.restoreFromSnapshot(snapshot);
+ parseParenAndDistinguishExpression(false);
+ return false;
+ }
+ return true;
+ }
+ }
+
+ return false;
+}
+
+function shouldParseArrow() {
+ return match(tt.colon) || !canInsertSemicolon();
+}
+
+// Returns whether there was an arrow token.
+export function parseArrow() {
+ if (isTypeScriptEnabled) {
+ return tsParseArrow();
+ } else if (isFlowEnabled) {
+ return flowParseArrow();
+ } else {
+ return eat(tt.arrow);
+ }
+}
+
+function parseParenItem() {
+ if (isTypeScriptEnabled || isFlowEnabled) {
+ typedParseParenItem();
+ }
+}
+
+// New's precedence is slightly tricky. It must allow its argument to
+// be a `[]` or dot subscript expression, but not a call — at least,
+// not without wrapping it in parentheses. Thus, it uses the noCalls
+// argument to parseSubscripts to prevent it from consuming the
+// argument list.
+function parseNew() {
+ expect(tt._new);
+ if (eat(tt.dot)) {
+ // new.target
+ parseIdentifier();
+ return;
+ }
+ parseNewCallee();
+ if (isFlowEnabled) {
+ flowStartParseNewArguments();
+ }
+ if (eat(tt.parenL)) {
+ parseExprList(tt.parenR);
+ }
+}
+
+function parseNewCallee() {
+ parseNoCallExpr();
+ eat(tt.questionDot);
+}
+
+export function parseTemplate() {
+ // Finish `, read quasi
+ nextTemplateToken();
+ // Finish quasi, read ${
+ nextTemplateToken();
+ while (!match(tt.backQuote) && !state.error) {
+ expect(tt.dollarBraceL);
+ parseExpression();
+ // Finish }, read quasi
+ nextTemplateToken();
+ // Finish quasi, read either ${ or `
+ nextTemplateToken();
+ }
+ next();
+}
+
+// Parse an object literal or binding pattern.
+export function parseObj(isPattern, isBlockScope) {
+ // Attach a context ID to the object open and close brace and each object key.
+ const contextId = getNextContextId();
+ let first = true;
+
+ next();
+ state.tokens[state.tokens.length - 1].contextId = contextId;
+
+ while (!eat(tt.braceR) && !state.error) {
+ if (first) {
+ first = false;
+ } else {
+ expect(tt.comma);
+ if (eat(tt.braceR)) {
+ break;
+ }
+ }
+
+ let isGenerator = false;
+ if (match(tt.ellipsis)) {
+ const previousIndex = state.tokens.length;
+ parseSpread();
+ if (isPattern) {
+ // Mark role when the only thing being spread over is an identifier.
+ if (state.tokens.length === previousIndex + 2) {
+ markPriorBindingIdentifier(isBlockScope);
+ }
+ if (eat(tt.braceR)) {
+ break;
+ }
+ }
+ continue;
+ }
+
+ if (!isPattern) {
+ isGenerator = eat(tt.star);
+ }
+
+ if (!isPattern && isContextual(ContextualKeyword._async)) {
+ if (isGenerator) unexpected();
+
+ parseIdentifier();
+ if (
+ match(tt.colon) ||
+ match(tt.parenL) ||
+ match(tt.braceR) ||
+ match(tt.eq) ||
+ match(tt.comma)
+ ) {
+ // This is a key called "async" rather than an async function.
+ } else {
+ if (match(tt.star)) {
+ next();
+ isGenerator = true;
+ }
+ parsePropertyName(contextId);
+ }
+ } else {
+ parsePropertyName(contextId);
+ }
+
+ parseObjPropValue(isPattern, isBlockScope, contextId);
+ }
+
+ state.tokens[state.tokens.length - 1].contextId = contextId;
+}
+
+function isGetterOrSetterMethod(isPattern) {
+ // We go off of the next and don't bother checking if the node key is actually "get" or "set".
+ // This lets us avoid generating a node, and should only make the validation worse.
+ return (
+ !isPattern &&
+ (match(tt.string) || // get "string"() {}
+ match(tt.num) || // get 1() {}
+ match(tt.bracketL) || // get ["string"]() {}
+ match(tt.name) || // get foo() {}
+ !!(state.type & TokenType.IS_KEYWORD)) // get debugger() {}
+ );
+}
+
+// Returns true if this was a method.
+function parseObjectMethod(isPattern, objectContextId) {
+ // We don't need to worry about modifiers because object methods can't have optional bodies, so
+ // the start will never be used.
+ const functionStart = state.start;
+ if (match(tt.parenL)) {
+ if (isPattern) unexpected();
+ parseMethod(functionStart, /* isConstructor */ false);
+ return true;
+ }
+
+ if (isGetterOrSetterMethod(isPattern)) {
+ parsePropertyName(objectContextId);
+ parseMethod(functionStart, /* isConstructor */ false);
+ return true;
+ }
+ return false;
+}
+
+function parseObjectProperty(isPattern, isBlockScope) {
+ if (eat(tt.colon)) {
+ if (isPattern) {
+ parseMaybeDefault(isBlockScope);
+ } else {
+ parseMaybeAssign(false);
+ }
+ return;
+ }
+
+ // Since there's no colon, we assume this is an object shorthand.
+
+ // If we're in a destructuring, we've now discovered that the key was actually an assignee, so
+ // we need to tag it as a declaration with the appropriate scope. Otherwise, we might need to
+ // transform it on access, so mark it as a normal object shorthand.
+ let identifierRole;
+ if (isPattern) {
+ if (state.scopeDepth === 0) {
+ identifierRole = IdentifierRole.ObjectShorthandTopLevelDeclaration;
+ } else if (isBlockScope) {
+ identifierRole = IdentifierRole.ObjectShorthandBlockScopedDeclaration;
+ } else {
+ identifierRole = IdentifierRole.ObjectShorthandFunctionScopedDeclaration;
+ }
+ } else {
+ identifierRole = IdentifierRole.ObjectShorthand;
+ }
+ state.tokens[state.tokens.length - 1].identifierRole = identifierRole;
+
+ // Regardless of whether we know this to be a pattern or if we're in an ambiguous context, allow
+ // parsing as if there's a default value.
+ parseMaybeDefault(isBlockScope, true);
+}
+
+function parseObjPropValue(
+ isPattern,
+ isBlockScope,
+ objectContextId,
+) {
+ if (isTypeScriptEnabled) {
+ tsStartParseObjPropValue();
+ } else if (isFlowEnabled) {
+ flowStartParseObjPropValue();
+ }
+ const wasMethod = parseObjectMethod(isPattern, objectContextId);
+ if (!wasMethod) {
+ parseObjectProperty(isPattern, isBlockScope);
+ }
+}
+
+export function parsePropertyName(objectContextId) {
+ if (isFlowEnabled) {
+ flowParseVariance();
+ }
+ if (eat(tt.bracketL)) {
+ state.tokens[state.tokens.length - 1].contextId = objectContextId;
+ parseMaybeAssign();
+ expect(tt.bracketR);
+ state.tokens[state.tokens.length - 1].contextId = objectContextId;
+ } else {
+ if (match(tt.num) || match(tt.string) || match(tt.bigint) || match(tt.decimal)) {
+ parseExprAtom();
+ } else {
+ parseMaybePrivateName();
+ }
+
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ObjectKey;
+ state.tokens[state.tokens.length - 1].contextId = objectContextId;
+ }
+}
+
+// Parse object or class method.
+export function parseMethod(functionStart, isConstructor) {
+ const funcContextId = getNextContextId();
+
+ state.scopeDepth++;
+ const startTokenIndex = state.tokens.length;
+ const allowModifiers = isConstructor; // For TypeScript parameter properties
+ parseFunctionParams(allowModifiers, funcContextId);
+ parseFunctionBodyAndFinish(functionStart, funcContextId);
+ const endTokenIndex = state.tokens.length;
+ state.scopes.push(new Scope(startTokenIndex, endTokenIndex, true));
+ state.scopeDepth--;
+}
+
+// Parse arrow function expression.
+// If the parameters are provided, they will be converted to an
+// assignable list.
+export function parseArrowExpression(startTokenIndex) {
+ parseFunctionBody(true);
+ const endTokenIndex = state.tokens.length;
+ state.scopes.push(new Scope(startTokenIndex, endTokenIndex, true));
+ state.scopeDepth--;
+}
+
+export function parseFunctionBodyAndFinish(functionStart, funcContextId = 0) {
+ if (isTypeScriptEnabled) {
+ tsParseFunctionBodyAndFinish(functionStart, funcContextId);
+ } else if (isFlowEnabled) {
+ flowParseFunctionBodyAndFinish(funcContextId);
+ } else {
+ parseFunctionBody(false, funcContextId);
+ }
+}
+
+export function parseFunctionBody(allowExpression, funcContextId = 0) {
+ const isExpression = allowExpression && !match(tt.braceL);
+
+ if (isExpression) {
+ parseMaybeAssign();
+ } else {
+ parseBlock(true /* isFunctionScope */, funcContextId);
+ }
+}
+
+// Parses a comma-separated list of expressions, and returns them as
+// an array. `close` is the token type that ends the list, and
+// `allowEmpty` can be turned on to allow subsequent commas with
+// nothing in between them to be parsed as `null` (which is needed
+// for array literals).
+
+function parseExprList(close, allowEmpty = false) {
+ let first = true;
+ while (!eat(close) && !state.error) {
+ if (first) {
+ first = false;
+ } else {
+ expect(tt.comma);
+ if (eat(close)) break;
+ }
+ parseExprListItem(allowEmpty);
+ }
+}
+
+function parseExprListItem(allowEmpty) {
+ if (allowEmpty && match(tt.comma)) {
+ // Empty item; nothing more to parse for this item.
+ } else if (match(tt.ellipsis)) {
+ parseSpread();
+ parseParenItem();
+ } else if (match(tt.question)) {
+ // Partial function application proposal.
+ next();
+ } else {
+ parseMaybeAssign(false, true);
+ }
+}
+
+// Parse the next token as an identifier.
+export function parseIdentifier() {
+ next();
+ state.tokens[state.tokens.length - 1].type = tt.name;
+}
+
+// Parses await expression inside async function.
+function parseAwait() {
+ parseMaybeUnary();
+}
+
+// Parses yield expression inside generator.
+function parseYield() {
+ next();
+ if (!match(tt.semi) && !canInsertSemicolon()) {
+ eat(tt.star);
+ parseMaybeAssign();
+ }
+}
+
+// https://github.com/tc39/proposal-js-module-blocks
+function parseModuleExpression() {
+ expectContextual(ContextualKeyword._module);
+ expect(tt.braceL);
+ // For now, just call parseBlockBody to parse the block. In the future when we
+ // implement full support, we'll want to emit scopes and possibly other
+ // information.
+ parseBlockBody(tt.braceR);
+}
diff --git a/node_modules/sucrase/dist/esm/parser/traverser/index.js b/node_modules/sucrase/dist/esm/parser/traverser/index.js
new file mode 100644
index 0000000..eb8c990
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/traverser/index.js
@@ -0,0 +1,18 @@
+
+import {nextToken, skipLineComment} from "../tokenizer/index";
+import {charCodes} from "../util/charcodes";
+import {input, state} from "./base";
+import {parseTopLevel} from "./statement";
+
+export function parseFile() {
+ // If enabled, skip leading hashbang line.
+ if (
+ state.pos === 0 &&
+ input.charCodeAt(0) === charCodes.numberSign &&
+ input.charCodeAt(1) === charCodes.exclamationMark
+ ) {
+ skipLineComment(2);
+ }
+ nextToken();
+ return parseTopLevel();
+}
diff --git a/node_modules/sucrase/dist/esm/parser/traverser/lval.js b/node_modules/sucrase/dist/esm/parser/traverser/lval.js
new file mode 100644
index 0000000..f5c4855
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/traverser/lval.js
@@ -0,0 +1,159 @@
+import {flowParseAssignableListItemTypes} from "../plugins/flow";
+import {tsParseAssignableListItemTypes, tsParseModifiers} from "../plugins/typescript";
+import {
+ eat,
+ IdentifierRole,
+ match,
+ next,
+ popTypeContext,
+ pushTypeContext,
+} from "../tokenizer/index";
+import {ContextualKeyword} from "../tokenizer/keywords";
+import {TokenType, TokenType as tt} from "../tokenizer/types";
+import {isFlowEnabled, isTypeScriptEnabled, state} from "./base";
+import {parseIdentifier, parseMaybeAssign, parseObj} from "./expression";
+import {expect, unexpected} from "./util";
+
+export function parseSpread() {
+ next();
+ parseMaybeAssign(false);
+}
+
+export function parseRest(isBlockScope) {
+ next();
+ parseBindingAtom(isBlockScope);
+}
+
+export function parseBindingIdentifier(isBlockScope) {
+ parseIdentifier();
+ markPriorBindingIdentifier(isBlockScope);
+}
+
+export function parseImportedIdentifier() {
+ parseIdentifier();
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ImportDeclaration;
+}
+
+export function markPriorBindingIdentifier(isBlockScope) {
+ let identifierRole;
+ if (state.scopeDepth === 0) {
+ identifierRole = IdentifierRole.TopLevelDeclaration;
+ } else if (isBlockScope) {
+ identifierRole = IdentifierRole.BlockScopedDeclaration;
+ } else {
+ identifierRole = IdentifierRole.FunctionScopedDeclaration;
+ }
+ state.tokens[state.tokens.length - 1].identifierRole = identifierRole;
+}
+
+// Parses lvalue (assignable) atom.
+export function parseBindingAtom(isBlockScope) {
+ switch (state.type) {
+ case tt._this: {
+ // In TypeScript, "this" may be the name of a parameter, so allow it.
+ const oldIsType = pushTypeContext(0);
+ next();
+ popTypeContext(oldIsType);
+ return;
+ }
+
+ case tt._yield:
+ case tt.name: {
+ state.type = tt.name;
+ parseBindingIdentifier(isBlockScope);
+ return;
+ }
+
+ case tt.bracketL: {
+ next();
+ parseBindingList(tt.bracketR, isBlockScope, true /* allowEmpty */);
+ return;
+ }
+
+ case tt.braceL:
+ parseObj(true, isBlockScope);
+ return;
+
+ default:
+ unexpected();
+ }
+}
+
+export function parseBindingList(
+ close,
+ isBlockScope,
+ allowEmpty = false,
+ allowModifiers = false,
+ contextId = 0,
+) {
+ let first = true;
+
+ let hasRemovedComma = false;
+ const firstItemTokenIndex = state.tokens.length;
+
+ while (!eat(close) && !state.error) {
+ if (first) {
+ first = false;
+ } else {
+ expect(tt.comma);
+ state.tokens[state.tokens.length - 1].contextId = contextId;
+ // After a "this" type in TypeScript, we need to set the following comma (if any) to also be
+ // a type token so that it will be removed.
+ if (!hasRemovedComma && state.tokens[firstItemTokenIndex].isType) {
+ state.tokens[state.tokens.length - 1].isType = true;
+ hasRemovedComma = true;
+ }
+ }
+ if (allowEmpty && match(tt.comma)) {
+ // Empty item; nothing further to parse for this item.
+ } else if (eat(close)) {
+ break;
+ } else if (match(tt.ellipsis)) {
+ parseRest(isBlockScope);
+ parseAssignableListItemTypes();
+ // Support rest element trailing commas allowed by TypeScript <2.9.
+ eat(TokenType.comma);
+ expect(close);
+ break;
+ } else {
+ parseAssignableListItem(allowModifiers, isBlockScope);
+ }
+ }
+}
+
+function parseAssignableListItem(allowModifiers, isBlockScope) {
+ if (allowModifiers) {
+ tsParseModifiers([
+ ContextualKeyword._public,
+ ContextualKeyword._protected,
+ ContextualKeyword._private,
+ ContextualKeyword._readonly,
+ ContextualKeyword._override,
+ ]);
+ }
+
+ parseMaybeDefault(isBlockScope);
+ parseAssignableListItemTypes();
+ parseMaybeDefault(isBlockScope, true /* leftAlreadyParsed */);
+}
+
+function parseAssignableListItemTypes() {
+ if (isFlowEnabled) {
+ flowParseAssignableListItemTypes();
+ } else if (isTypeScriptEnabled) {
+ tsParseAssignableListItemTypes();
+ }
+}
+
+// Parses assignment pattern around given atom if possible.
+export function parseMaybeDefault(isBlockScope, leftAlreadyParsed = false) {
+ if (!leftAlreadyParsed) {
+ parseBindingAtom(isBlockScope);
+ }
+ if (!eat(tt.eq)) {
+ return;
+ }
+ const eqIndex = state.tokens.length - 1;
+ parseMaybeAssign();
+ state.tokens[eqIndex].rhsEndIndex = state.tokens.length;
+}
diff --git a/node_modules/sucrase/dist/esm/parser/traverser/statement.js b/node_modules/sucrase/dist/esm/parser/traverser/statement.js
new file mode 100644
index 0000000..34a6511
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/traverser/statement.js
@@ -0,0 +1,1332 @@
+/* eslint max-len: 0 */
+
+import {File} from "../index";
+import {
+ flowAfterParseClassSuper,
+ flowAfterParseVarHead,
+ flowParseExportDeclaration,
+ flowParseExportStar,
+ flowParseIdentifierStatement,
+ flowParseImportSpecifier,
+ flowParseTypeAnnotation,
+ flowParseTypeParameterDeclaration,
+ flowShouldDisallowExportDefaultSpecifier,
+ flowShouldParseExportDeclaration,
+ flowShouldParseExportStar,
+ flowStartParseFunctionParams,
+ flowStartParseImportSpecifiers,
+ flowTryParseExportDefaultExpression,
+ flowTryParseStatement,
+} from "../plugins/flow";
+import {
+ tsAfterParseClassSuper,
+ tsAfterParseVarHead,
+ tsIsDeclarationStart,
+ tsParseExportDeclaration,
+ tsParseExportSpecifier,
+ tsParseIdentifierStatement,
+ tsParseImportEqualsDeclaration,
+ tsParseImportSpecifier,
+ tsParseMaybeDecoratorArguments,
+ tsParseModifiers,
+ tsStartParseFunctionParams,
+ tsTryParseClassMemberWithIsStatic,
+ tsTryParseExport,
+ tsTryParseExportDefaultExpression,
+ tsTryParseStatementContent,
+ tsTryParseTypeAnnotation,
+ tsTryParseTypeParameters,
+} from "../plugins/typescript";
+import {
+ eat,
+ eatTypeToken,
+ IdentifierRole,
+ lookaheadType,
+ lookaheadTypeAndKeyword,
+ match,
+ next,
+ nextTokenStart,
+ nextTokenStartSince,
+ popTypeContext,
+ pushTypeContext,
+} from "../tokenizer";
+import {ContextualKeyword} from "../tokenizer/keywords";
+import {Scope} from "../tokenizer/state";
+import { TokenType as tt} from "../tokenizer/types";
+import {charCodes} from "../util/charcodes";
+import {getNextContextId, input, isFlowEnabled, isTypeScriptEnabled, state} from "./base";
+import {
+ parseCallExpressionArguments,
+ parseExprAtom,
+ parseExpression,
+ parseExprSubscripts,
+ parseFunctionBodyAndFinish,
+ parseIdentifier,
+ parseMaybeAssign,
+ parseMethod,
+ parseObj,
+ parseParenExpression,
+ parsePropertyName,
+} from "./expression";
+import {
+ parseBindingAtom,
+ parseBindingIdentifier,
+ parseBindingList,
+ parseImportedIdentifier,
+} from "./lval";
+import {
+ canInsertSemicolon,
+ eatContextual,
+ expect,
+ expectContextual,
+ hasFollowingLineBreak,
+ hasPrecedingLineBreak,
+ isContextual,
+ isLineTerminator,
+ isLookaheadContextual,
+ semicolon,
+ unexpected,
+} from "./util";
+
+export function parseTopLevel() {
+ parseBlockBody(tt.eof);
+ state.scopes.push(new Scope(0, state.tokens.length, true));
+ if (state.scopeDepth !== 0) {
+ throw new Error(`Invalid scope depth at end of file: ${state.scopeDepth}`);
+ }
+ return new File(state.tokens, state.scopes);
+}
+
+// Parse a single statement.
+//
+// If expecting a statement and finding a slash operator, parse a
+// regular expression literal. This is to handle cases like
+// `if (foo) /blah/.exec(foo)`, where looking at the previous token
+// does not help.
+
+export function parseStatement(declaration) {
+ if (isFlowEnabled) {
+ if (flowTryParseStatement()) {
+ return;
+ }
+ }
+ if (match(tt.at)) {
+ parseDecorators();
+ }
+ parseStatementContent(declaration);
+}
+
+function parseStatementContent(declaration) {
+ if (isTypeScriptEnabled) {
+ if (tsTryParseStatementContent()) {
+ return;
+ }
+ }
+
+ const starttype = state.type;
+
+ // Most types of statements are recognized by the keyword they
+ // start with. Many are trivial to parse, some require a bit of
+ // complexity.
+
+ switch (starttype) {
+ case tt._break:
+ case tt._continue:
+ parseBreakContinueStatement();
+ return;
+ case tt._debugger:
+ parseDebuggerStatement();
+ return;
+ case tt._do:
+ parseDoStatement();
+ return;
+ case tt._for:
+ parseForStatement();
+ return;
+ case tt._function:
+ if (lookaheadType() === tt.dot) break;
+ if (!declaration) unexpected();
+ parseFunctionStatement();
+ return;
+
+ case tt._class:
+ if (!declaration) unexpected();
+ parseClass(true);
+ return;
+
+ case tt._if:
+ parseIfStatement();
+ return;
+ case tt._return:
+ parseReturnStatement();
+ return;
+ case tt._switch:
+ parseSwitchStatement();
+ return;
+ case tt._throw:
+ parseThrowStatement();
+ return;
+ case tt._try:
+ parseTryStatement();
+ return;
+
+ case tt._let:
+ case tt._const:
+ if (!declaration) unexpected(); // NOTE: falls through to _var
+
+ case tt._var:
+ parseVarStatement(starttype !== tt._var);
+ return;
+
+ case tt._while:
+ parseWhileStatement();
+ return;
+ case tt.braceL:
+ parseBlock();
+ return;
+ case tt.semi:
+ parseEmptyStatement();
+ return;
+ case tt._export:
+ case tt._import: {
+ const nextType = lookaheadType();
+ if (nextType === tt.parenL || nextType === tt.dot) {
+ break;
+ }
+ next();
+ if (starttype === tt._import) {
+ parseImport();
+ } else {
+ parseExport();
+ }
+ return;
+ }
+ case tt.name:
+ if (state.contextualKeyword === ContextualKeyword._async) {
+ const functionStart = state.start;
+ // peek ahead and see if next token is a function
+ const snapshot = state.snapshot();
+ next();
+ if (match(tt._function) && !canInsertSemicolon()) {
+ expect(tt._function);
+ parseFunction(functionStart, true);
+ return;
+ } else {
+ state.restoreFromSnapshot(snapshot);
+ }
+ } else if (
+ state.contextualKeyword === ContextualKeyword._using &&
+ !hasFollowingLineBreak() &&
+ // Statements like `using[0]` and `using in foo` aren't actual using
+ // declarations.
+ lookaheadType() === tt.name
+ ) {
+ parseVarStatement(true);
+ return;
+ } else if (startsAwaitUsing()) {
+ expectContextual(ContextualKeyword._await);
+ parseVarStatement(true);
+ return;
+ }
+ default:
+ // Do nothing.
+ break;
+ }
+
+ // If the statement does not start with a statement keyword or a
+ // brace, it's an ExpressionStatement or LabeledStatement. We
+ // simply start parsing an expression, and afterwards, if the
+ // next token is a colon and the expression was a simple
+ // Identifier node, we switch to interpreting it as a label.
+ const initialTokensLength = state.tokens.length;
+ parseExpression();
+ let simpleName = null;
+ if (state.tokens.length === initialTokensLength + 1) {
+ const token = state.tokens[state.tokens.length - 1];
+ if (token.type === tt.name) {
+ simpleName = token.contextualKeyword;
+ }
+ }
+ if (simpleName == null) {
+ semicolon();
+ return;
+ }
+ if (eat(tt.colon)) {
+ parseLabeledStatement();
+ } else {
+ // This was an identifier, so we might want to handle flow/typescript-specific cases.
+ parseIdentifierStatement(simpleName);
+ }
+}
+
+/**
+ * Determine if we're positioned at an `await using` declaration.
+ *
+ * Note that this can happen either in place of a regular variable declaration
+ * or in a loop body, and in both places, there are similar-looking cases where
+ * we need to return false.
+ *
+ * Examples returning true:
+ * await using foo = bar();
+ * for (await using a of b) {}
+ *
+ * Examples returning false:
+ * await using
+ * await using + 1
+ * await using instanceof T
+ * for (await using;;) {}
+ *
+ * For now, we early return if we don't see `await`, then do a simple
+ * backtracking-based lookahead for the `using` and identifier tokens. In the
+ * future, this could be optimized with a character-based approach.
+ */
+function startsAwaitUsing() {
+ if (!isContextual(ContextualKeyword._await)) {
+ return false;
+ }
+ const snapshot = state.snapshot();
+ // await
+ next();
+ if (!isContextual(ContextualKeyword._using) || hasPrecedingLineBreak()) {
+ state.restoreFromSnapshot(snapshot);
+ return false;
+ }
+ // using
+ next();
+ if (!match(tt.name) || hasPrecedingLineBreak()) {
+ state.restoreFromSnapshot(snapshot);
+ return false;
+ }
+ state.restoreFromSnapshot(snapshot);
+ return true;
+}
+
+export function parseDecorators() {
+ while (match(tt.at)) {
+ parseDecorator();
+ }
+}
+
+function parseDecorator() {
+ next();
+ if (eat(tt.parenL)) {
+ parseExpression();
+ expect(tt.parenR);
+ } else {
+ parseIdentifier();
+ while (eat(tt.dot)) {
+ parseIdentifier();
+ }
+ parseMaybeDecoratorArguments();
+ }
+}
+
+function parseMaybeDecoratorArguments() {
+ if (isTypeScriptEnabled) {
+ tsParseMaybeDecoratorArguments();
+ } else {
+ baseParseMaybeDecoratorArguments();
+ }
+}
+
+export function baseParseMaybeDecoratorArguments() {
+ if (eat(tt.parenL)) {
+ parseCallExpressionArguments();
+ }
+}
+
+function parseBreakContinueStatement() {
+ next();
+ if (!isLineTerminator()) {
+ parseIdentifier();
+ semicolon();
+ }
+}
+
+function parseDebuggerStatement() {
+ next();
+ semicolon();
+}
+
+function parseDoStatement() {
+ next();
+ parseStatement(false);
+ expect(tt._while);
+ parseParenExpression();
+ eat(tt.semi);
+}
+
+function parseForStatement() {
+ state.scopeDepth++;
+ const startTokenIndex = state.tokens.length;
+ parseAmbiguousForStatement();
+ const endTokenIndex = state.tokens.length;
+ state.scopes.push(new Scope(startTokenIndex, endTokenIndex, false));
+ state.scopeDepth--;
+}
+
+/**
+ * Determine if this token is a `using` declaration (explicit resource
+ * management) as part of a loop.
+ * https://github.com/tc39/proposal-explicit-resource-management
+ */
+function isUsingInLoop() {
+ if (!isContextual(ContextualKeyword._using)) {
+ return false;
+ }
+ // This must be `for (using of`, where `using` is the name of the loop
+ // variable.
+ if (isLookaheadContextual(ContextualKeyword._of)) {
+ return false;
+ }
+ return true;
+}
+
+// Disambiguating between a `for` and a `for`/`in` or `for`/`of`
+// loop is non-trivial. Basically, we have to parse the init `var`
+// statement or expression, disallowing the `in` operator (see
+// the second parameter to `parseExpression`), and then check
+// whether the next token is `in` or `of`. When there is no init
+// part (semicolon immediately after the opening parenthesis), it
+// is a regular `for` loop.
+function parseAmbiguousForStatement() {
+ next();
+
+ let forAwait = false;
+ if (isContextual(ContextualKeyword._await)) {
+ forAwait = true;
+ next();
+ }
+ expect(tt.parenL);
+
+ if (match(tt.semi)) {
+ if (forAwait) {
+ unexpected();
+ }
+ parseFor();
+ return;
+ }
+
+ const isAwaitUsing = startsAwaitUsing();
+ if (isAwaitUsing || match(tt._var) || match(tt._let) || match(tt._const) || isUsingInLoop()) {
+ if (isAwaitUsing) {
+ expectContextual(ContextualKeyword._await);
+ }
+ next();
+ parseVar(true, state.type !== tt._var);
+ if (match(tt._in) || isContextual(ContextualKeyword._of)) {
+ parseForIn(forAwait);
+ return;
+ }
+ parseFor();
+ return;
+ }
+
+ parseExpression(true);
+ if (match(tt._in) || isContextual(ContextualKeyword._of)) {
+ parseForIn(forAwait);
+ return;
+ }
+ if (forAwait) {
+ unexpected();
+ }
+ parseFor();
+}
+
+function parseFunctionStatement() {
+ const functionStart = state.start;
+ next();
+ parseFunction(functionStart, true);
+}
+
+function parseIfStatement() {
+ next();
+ parseParenExpression();
+ parseStatement(false);
+ if (eat(tt._else)) {
+ parseStatement(false);
+ }
+}
+
+function parseReturnStatement() {
+ next();
+
+ // In `return` (and `break`/`continue`), the keywords with
+ // optional arguments, we eagerly look for a semicolon or the
+ // possibility to insert one.
+
+ if (!isLineTerminator()) {
+ parseExpression();
+ semicolon();
+ }
+}
+
+function parseSwitchStatement() {
+ next();
+ parseParenExpression();
+ state.scopeDepth++;
+ const startTokenIndex = state.tokens.length;
+ expect(tt.braceL);
+
+ // Don't bother validation; just go through any sequence of cases, defaults, and statements.
+ while (!match(tt.braceR) && !state.error) {
+ if (match(tt._case) || match(tt._default)) {
+ const isCase = match(tt._case);
+ next();
+ if (isCase) {
+ parseExpression();
+ }
+ expect(tt.colon);
+ } else {
+ parseStatement(true);
+ }
+ }
+ next(); // Closing brace
+ const endTokenIndex = state.tokens.length;
+ state.scopes.push(new Scope(startTokenIndex, endTokenIndex, false));
+ state.scopeDepth--;
+}
+
+function parseThrowStatement() {
+ next();
+ parseExpression();
+ semicolon();
+}
+
+function parseCatchClauseParam() {
+ parseBindingAtom(true /* isBlockScope */);
+
+ if (isTypeScriptEnabled) {
+ tsTryParseTypeAnnotation();
+ }
+}
+
+function parseTryStatement() {
+ next();
+
+ parseBlock();
+
+ if (match(tt._catch)) {
+ next();
+ let catchBindingStartTokenIndex = null;
+ if (match(tt.parenL)) {
+ state.scopeDepth++;
+ catchBindingStartTokenIndex = state.tokens.length;
+ expect(tt.parenL);
+ parseCatchClauseParam();
+ expect(tt.parenR);
+ }
+ parseBlock();
+ if (catchBindingStartTokenIndex != null) {
+ // We need a special scope for the catch binding which includes the binding itself and the
+ // catch block.
+ const endTokenIndex = state.tokens.length;
+ state.scopes.push(new Scope(catchBindingStartTokenIndex, endTokenIndex, false));
+ state.scopeDepth--;
+ }
+ }
+ if (eat(tt._finally)) {
+ parseBlock();
+ }
+}
+
+export function parseVarStatement(isBlockScope) {
+ next();
+ parseVar(false, isBlockScope);
+ semicolon();
+}
+
+function parseWhileStatement() {
+ next();
+ parseParenExpression();
+ parseStatement(false);
+}
+
+function parseEmptyStatement() {
+ next();
+}
+
+function parseLabeledStatement() {
+ parseStatement(true);
+}
+
+/**
+ * Parse a statement starting with an identifier of the given name. Subclasses match on the name
+ * to handle statements like "declare".
+ */
+function parseIdentifierStatement(contextualKeyword) {
+ if (isTypeScriptEnabled) {
+ tsParseIdentifierStatement(contextualKeyword);
+ } else if (isFlowEnabled) {
+ flowParseIdentifierStatement(contextualKeyword);
+ } else {
+ semicolon();
+ }
+}
+
+// Parse a semicolon-enclosed block of statements.
+export function parseBlock(isFunctionScope = false, contextId = 0) {
+ const startTokenIndex = state.tokens.length;
+ state.scopeDepth++;
+ expect(tt.braceL);
+ if (contextId) {
+ state.tokens[state.tokens.length - 1].contextId = contextId;
+ }
+ parseBlockBody(tt.braceR);
+ if (contextId) {
+ state.tokens[state.tokens.length - 1].contextId = contextId;
+ }
+ const endTokenIndex = state.tokens.length;
+ state.scopes.push(new Scope(startTokenIndex, endTokenIndex, isFunctionScope));
+ state.scopeDepth--;
+}
+
+export function parseBlockBody(end) {
+ while (!eat(end) && !state.error) {
+ parseStatement(true);
+ }
+}
+
+// Parse a regular `for` loop. The disambiguation code in
+// `parseStatement` will already have parsed the init statement or
+// expression.
+
+function parseFor() {
+ expect(tt.semi);
+ if (!match(tt.semi)) {
+ parseExpression();
+ }
+ expect(tt.semi);
+ if (!match(tt.parenR)) {
+ parseExpression();
+ }
+ expect(tt.parenR);
+ parseStatement(false);
+}
+
+// Parse a `for`/`in` and `for`/`of` loop, which are almost
+// same from parser's perspective.
+
+function parseForIn(forAwait) {
+ if (forAwait) {
+ eatContextual(ContextualKeyword._of);
+ } else {
+ next();
+ }
+ parseExpression();
+ expect(tt.parenR);
+ parseStatement(false);
+}
+
+// Parse a list of variable declarations.
+
+function parseVar(isFor, isBlockScope) {
+ while (true) {
+ parseVarHead(isBlockScope);
+ if (eat(tt.eq)) {
+ const eqIndex = state.tokens.length - 1;
+ parseMaybeAssign(isFor);
+ state.tokens[eqIndex].rhsEndIndex = state.tokens.length;
+ }
+ if (!eat(tt.comma)) {
+ break;
+ }
+ }
+}
+
+function parseVarHead(isBlockScope) {
+ parseBindingAtom(isBlockScope);
+ if (isTypeScriptEnabled) {
+ tsAfterParseVarHead();
+ } else if (isFlowEnabled) {
+ flowAfterParseVarHead();
+ }
+}
+
+// Parse a function declaration or literal (depending on the
+// `isStatement` parameter).
+
+export function parseFunction(
+ functionStart,
+ isStatement,
+ optionalId = false,
+) {
+ if (match(tt.star)) {
+ next();
+ }
+
+ if (isStatement && !optionalId && !match(tt.name) && !match(tt._yield)) {
+ unexpected();
+ }
+
+ let nameScopeStartTokenIndex = null;
+
+ if (match(tt.name)) {
+ // Expression-style functions should limit their name's scope to the function body, so we make
+ // a new function scope to enforce that.
+ if (!isStatement) {
+ nameScopeStartTokenIndex = state.tokens.length;
+ state.scopeDepth++;
+ }
+ parseBindingIdentifier(false);
+ }
+
+ const startTokenIndex = state.tokens.length;
+ state.scopeDepth++;
+ parseFunctionParams();
+ parseFunctionBodyAndFinish(functionStart);
+ const endTokenIndex = state.tokens.length;
+ // In addition to the block scope of the function body, we need a separate function-style scope
+ // that includes the params.
+ state.scopes.push(new Scope(startTokenIndex, endTokenIndex, true));
+ state.scopeDepth--;
+ if (nameScopeStartTokenIndex !== null) {
+ state.scopes.push(new Scope(nameScopeStartTokenIndex, endTokenIndex, true));
+ state.scopeDepth--;
+ }
+}
+
+export function parseFunctionParams(
+ allowModifiers = false,
+ funcContextId = 0,
+) {
+ if (isTypeScriptEnabled) {
+ tsStartParseFunctionParams();
+ } else if (isFlowEnabled) {
+ flowStartParseFunctionParams();
+ }
+
+ expect(tt.parenL);
+ if (funcContextId) {
+ state.tokens[state.tokens.length - 1].contextId = funcContextId;
+ }
+ parseBindingList(
+ tt.parenR,
+ false /* isBlockScope */,
+ false /* allowEmpty */,
+ allowModifiers,
+ funcContextId,
+ );
+ if (funcContextId) {
+ state.tokens[state.tokens.length - 1].contextId = funcContextId;
+ }
+}
+
+// Parse a class declaration or literal (depending on the
+// `isStatement` parameter).
+
+export function parseClass(isStatement, optionalId = false) {
+ // Put a context ID on the class keyword, the open-brace, and the close-brace, so that later
+ // code can easily navigate to meaningful points on the class.
+ const contextId = getNextContextId();
+
+ next();
+ state.tokens[state.tokens.length - 1].contextId = contextId;
+ state.tokens[state.tokens.length - 1].isExpression = !isStatement;
+ // Like with functions, we declare a special "name scope" from the start of the name to the end
+ // of the class, but only with expression-style classes, to represent the fact that the name is
+ // available to the body of the class but not an outer declaration.
+ let nameScopeStartTokenIndex = null;
+ if (!isStatement) {
+ nameScopeStartTokenIndex = state.tokens.length;
+ state.scopeDepth++;
+ }
+ parseClassId(isStatement, optionalId);
+ parseClassSuper();
+ const openBraceIndex = state.tokens.length;
+ parseClassBody(contextId);
+ if (state.error) {
+ return;
+ }
+ state.tokens[openBraceIndex].contextId = contextId;
+ state.tokens[state.tokens.length - 1].contextId = contextId;
+ if (nameScopeStartTokenIndex !== null) {
+ const endTokenIndex = state.tokens.length;
+ state.scopes.push(new Scope(nameScopeStartTokenIndex, endTokenIndex, false));
+ state.scopeDepth--;
+ }
+}
+
+function isClassProperty() {
+ return match(tt.eq) || match(tt.semi) || match(tt.braceR) || match(tt.bang) || match(tt.colon);
+}
+
+function isClassMethod() {
+ return match(tt.parenL) || match(tt.lessThan);
+}
+
+function parseClassBody(classContextId) {
+ expect(tt.braceL);
+
+ while (!eat(tt.braceR) && !state.error) {
+ if (eat(tt.semi)) {
+ continue;
+ }
+
+ if (match(tt.at)) {
+ parseDecorator();
+ continue;
+ }
+ const memberStart = state.start;
+ parseClassMember(memberStart, classContextId);
+ }
+}
+
+function parseClassMember(memberStart, classContextId) {
+ if (isTypeScriptEnabled) {
+ tsParseModifiers([
+ ContextualKeyword._declare,
+ ContextualKeyword._public,
+ ContextualKeyword._protected,
+ ContextualKeyword._private,
+ ContextualKeyword._override,
+ ]);
+ }
+ let isStatic = false;
+ if (match(tt.name) && state.contextualKeyword === ContextualKeyword._static) {
+ parseIdentifier(); // eats 'static'
+ if (isClassMethod()) {
+ parseClassMethod(memberStart, /* isConstructor */ false);
+ return;
+ } else if (isClassProperty()) {
+ parseClassProperty();
+ return;
+ }
+ // otherwise something static
+ state.tokens[state.tokens.length - 1].type = tt._static;
+ isStatic = true;
+
+ if (match(tt.braceL)) {
+ // This is a static block. Mark the word "static" with the class context ID for class element
+ // detection and parse as a regular block.
+ state.tokens[state.tokens.length - 1].contextId = classContextId;
+ parseBlock();
+ return;
+ }
+ }
+
+ parseClassMemberWithIsStatic(memberStart, isStatic, classContextId);
+}
+
+function parseClassMemberWithIsStatic(
+ memberStart,
+ isStatic,
+ classContextId,
+) {
+ if (isTypeScriptEnabled) {
+ if (tsTryParseClassMemberWithIsStatic(isStatic)) {
+ return;
+ }
+ }
+ if (eat(tt.star)) {
+ // a generator
+ parseClassPropertyName(classContextId);
+ parseClassMethod(memberStart, /* isConstructor */ false);
+ return;
+ }
+
+ // Get the identifier name so we can tell if it's actually a keyword like "async", "get", or
+ // "set".
+ parseClassPropertyName(classContextId);
+ let isConstructor = false;
+ const token = state.tokens[state.tokens.length - 1];
+ // We allow "constructor" as either an identifier or a string.
+ if (token.contextualKeyword === ContextualKeyword._constructor) {
+ isConstructor = true;
+ }
+ parsePostMemberNameModifiers();
+
+ if (isClassMethod()) {
+ parseClassMethod(memberStart, isConstructor);
+ } else if (isClassProperty()) {
+ parseClassProperty();
+ } else if (token.contextualKeyword === ContextualKeyword._async && !isLineTerminator()) {
+ state.tokens[state.tokens.length - 1].type = tt._async;
+ // an async method
+ const isGenerator = match(tt.star);
+ if (isGenerator) {
+ next();
+ }
+
+ // The so-called parsed name would have been "async": get the real name.
+ parseClassPropertyName(classContextId);
+ parsePostMemberNameModifiers();
+ parseClassMethod(memberStart, false /* isConstructor */);
+ } else if (
+ (token.contextualKeyword === ContextualKeyword._get ||
+ token.contextualKeyword === ContextualKeyword._set) &&
+ !(isLineTerminator() && match(tt.star))
+ ) {
+ if (token.contextualKeyword === ContextualKeyword._get) {
+ state.tokens[state.tokens.length - 1].type = tt._get;
+ } else {
+ state.tokens[state.tokens.length - 1].type = tt._set;
+ }
+ // `get\n*` is an uninitialized property named 'get' followed by a generator.
+ // a getter or setter
+ // The so-called parsed name would have been "get/set": get the real name.
+ parseClassPropertyName(classContextId);
+ parseClassMethod(memberStart, /* isConstructor */ false);
+ } else if (token.contextualKeyword === ContextualKeyword._accessor && !isLineTerminator()) {
+ parseClassPropertyName(classContextId);
+ parseClassProperty();
+ } else if (isLineTerminator()) {
+ // an uninitialized class property (due to ASI, since we don't otherwise recognize the next token)
+ parseClassProperty();
+ } else {
+ unexpected();
+ }
+}
+
+function parseClassMethod(functionStart, isConstructor) {
+ if (isTypeScriptEnabled) {
+ tsTryParseTypeParameters();
+ } else if (isFlowEnabled) {
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ }
+ }
+ parseMethod(functionStart, isConstructor);
+}
+
+// Return the name of the class property, if it is a simple identifier.
+export function parseClassPropertyName(classContextId) {
+ parsePropertyName(classContextId);
+}
+
+export function parsePostMemberNameModifiers() {
+ if (isTypeScriptEnabled) {
+ const oldIsType = pushTypeContext(0);
+ eat(tt.question);
+ popTypeContext(oldIsType);
+ }
+}
+
+export function parseClassProperty() {
+ if (isTypeScriptEnabled) {
+ eatTypeToken(tt.bang);
+ tsTryParseTypeAnnotation();
+ } else if (isFlowEnabled) {
+ if (match(tt.colon)) {
+ flowParseTypeAnnotation();
+ }
+ }
+
+ if (match(tt.eq)) {
+ const equalsTokenIndex = state.tokens.length;
+ next();
+ parseMaybeAssign();
+ state.tokens[equalsTokenIndex].rhsEndIndex = state.tokens.length;
+ }
+ semicolon();
+}
+
+function parseClassId(isStatement, optionalId = false) {
+ if (
+ isTypeScriptEnabled &&
+ (!isStatement || optionalId) &&
+ isContextual(ContextualKeyword._implements)
+ ) {
+ return;
+ }
+
+ if (match(tt.name)) {
+ parseBindingIdentifier(true);
+ }
+
+ if (isTypeScriptEnabled) {
+ tsTryParseTypeParameters();
+ } else if (isFlowEnabled) {
+ if (match(tt.lessThan)) {
+ flowParseTypeParameterDeclaration();
+ }
+ }
+}
+
+// Returns true if there was a superclass.
+function parseClassSuper() {
+ let hasSuper = false;
+ if (eat(tt._extends)) {
+ parseExprSubscripts();
+ hasSuper = true;
+ } else {
+ hasSuper = false;
+ }
+ if (isTypeScriptEnabled) {
+ tsAfterParseClassSuper(hasSuper);
+ } else if (isFlowEnabled) {
+ flowAfterParseClassSuper(hasSuper);
+ }
+}
+
+// Parses module export declaration.
+
+export function parseExport() {
+ const exportIndex = state.tokens.length - 1;
+ if (isTypeScriptEnabled) {
+ if (tsTryParseExport()) {
+ return;
+ }
+ }
+ // export * from '...'
+ if (shouldParseExportStar()) {
+ parseExportStar();
+ } else if (isExportDefaultSpecifier()) {
+ // export default from
+ parseIdentifier();
+ if (match(tt.comma) && lookaheadType() === tt.star) {
+ expect(tt.comma);
+ expect(tt.star);
+ expectContextual(ContextualKeyword._as);
+ parseIdentifier();
+ } else {
+ parseExportSpecifiersMaybe();
+ }
+ parseExportFrom();
+ } else if (eat(tt._default)) {
+ // export default ...
+ parseExportDefaultExpression();
+ } else if (shouldParseExportDeclaration()) {
+ parseExportDeclaration();
+ } else {
+ // export { x, y as z } [from '...']
+ parseExportSpecifiers();
+ parseExportFrom();
+ }
+ state.tokens[exportIndex].rhsEndIndex = state.tokens.length;
+}
+
+function parseExportDefaultExpression() {
+ if (isTypeScriptEnabled) {
+ if (tsTryParseExportDefaultExpression()) {
+ return;
+ }
+ }
+ if (isFlowEnabled) {
+ if (flowTryParseExportDefaultExpression()) {
+ return;
+ }
+ }
+ const functionStart = state.start;
+ if (eat(tt._function)) {
+ parseFunction(functionStart, true, true);
+ } else if (isContextual(ContextualKeyword._async) && lookaheadType() === tt._function) {
+ // async function declaration
+ eatContextual(ContextualKeyword._async);
+ eat(tt._function);
+ parseFunction(functionStart, true, true);
+ } else if (match(tt._class)) {
+ parseClass(true, true);
+ } else if (match(tt.at)) {
+ parseDecorators();
+ parseClass(true, true);
+ } else {
+ parseMaybeAssign();
+ semicolon();
+ }
+}
+
+function parseExportDeclaration() {
+ if (isTypeScriptEnabled) {
+ tsParseExportDeclaration();
+ } else if (isFlowEnabled) {
+ flowParseExportDeclaration();
+ } else {
+ parseStatement(true);
+ }
+}
+
+function isExportDefaultSpecifier() {
+ if (isTypeScriptEnabled && tsIsDeclarationStart()) {
+ return false;
+ } else if (isFlowEnabled && flowShouldDisallowExportDefaultSpecifier()) {
+ return false;
+ }
+ if (match(tt.name)) {
+ return state.contextualKeyword !== ContextualKeyword._async;
+ }
+
+ if (!match(tt._default)) {
+ return false;
+ }
+
+ const _next = nextTokenStart();
+ const lookahead = lookaheadTypeAndKeyword();
+ const hasFrom =
+ lookahead.type === tt.name && lookahead.contextualKeyword === ContextualKeyword._from;
+ if (lookahead.type === tt.comma) {
+ return true;
+ }
+ // lookahead again when `export default from` is seen
+ if (hasFrom) {
+ const nextAfterFrom = input.charCodeAt(nextTokenStartSince(_next + 4));
+ return nextAfterFrom === charCodes.quotationMark || nextAfterFrom === charCodes.apostrophe;
+ }
+ return false;
+}
+
+function parseExportSpecifiersMaybe() {
+ if (eat(tt.comma)) {
+ parseExportSpecifiers();
+ }
+}
+
+export function parseExportFrom() {
+ if (eatContextual(ContextualKeyword._from)) {
+ parseExprAtom();
+ maybeParseImportAttributes();
+ }
+ semicolon();
+}
+
+function shouldParseExportStar() {
+ if (isFlowEnabled) {
+ return flowShouldParseExportStar();
+ } else {
+ return match(tt.star);
+ }
+}
+
+function parseExportStar() {
+ if (isFlowEnabled) {
+ flowParseExportStar();
+ } else {
+ baseParseExportStar();
+ }
+}
+
+export function baseParseExportStar() {
+ expect(tt.star);
+
+ if (isContextual(ContextualKeyword._as)) {
+ parseExportNamespace();
+ } else {
+ parseExportFrom();
+ }
+}
+
+function parseExportNamespace() {
+ next();
+ state.tokens[state.tokens.length - 1].type = tt._as;
+ parseIdentifier();
+ parseExportSpecifiersMaybe();
+ parseExportFrom();
+}
+
+function shouldParseExportDeclaration() {
+ return (
+ (isTypeScriptEnabled && tsIsDeclarationStart()) ||
+ (isFlowEnabled && flowShouldParseExportDeclaration()) ||
+ state.type === tt._var ||
+ state.type === tt._const ||
+ state.type === tt._let ||
+ state.type === tt._function ||
+ state.type === tt._class ||
+ isContextual(ContextualKeyword._async) ||
+ match(tt.at)
+ );
+}
+
+// Parses a comma-separated list of module exports.
+export function parseExportSpecifiers() {
+ let first = true;
+
+ // export { x, y as z } [from '...']
+ expect(tt.braceL);
+
+ while (!eat(tt.braceR) && !state.error) {
+ if (first) {
+ first = false;
+ } else {
+ expect(tt.comma);
+ if (eat(tt.braceR)) {
+ break;
+ }
+ }
+ parseExportSpecifier();
+ }
+}
+
+function parseExportSpecifier() {
+ if (isTypeScriptEnabled) {
+ tsParseExportSpecifier();
+ return;
+ }
+ parseIdentifier();
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ExportAccess;
+ if (eatContextual(ContextualKeyword._as)) {
+ parseIdentifier();
+ }
+}
+
+/**
+ * Starting at the `module` token in an import, determine if it was truly an
+ * import reflection token or just looks like one.
+ *
+ * Returns true for:
+ * import module foo from "foo";
+ * import module from from "foo";
+ *
+ * Returns false for:
+ * import module from "foo";
+ * import module, {bar} from "foo";
+ */
+function isImportReflection() {
+ const snapshot = state.snapshot();
+ expectContextual(ContextualKeyword._module);
+ if (eatContextual(ContextualKeyword._from)) {
+ if (isContextual(ContextualKeyword._from)) {
+ state.restoreFromSnapshot(snapshot);
+ return true;
+ } else {
+ state.restoreFromSnapshot(snapshot);
+ return false;
+ }
+ } else if (match(tt.comma)) {
+ state.restoreFromSnapshot(snapshot);
+ return false;
+ } else {
+ state.restoreFromSnapshot(snapshot);
+ return true;
+ }
+}
+
+/**
+ * Eat the "module" token from the import reflection proposal.
+ * https://github.com/tc39/proposal-import-reflection
+ */
+function parseMaybeImportReflection() {
+ // isImportReflection does snapshot/restore, so only run it if we see the word
+ // "module".
+ if (isContextual(ContextualKeyword._module) && isImportReflection()) {
+ next();
+ }
+}
+
+// Parses import declaration.
+
+export function parseImport() {
+ if (isTypeScriptEnabled && match(tt.name) && lookaheadType() === tt.eq) {
+ tsParseImportEqualsDeclaration();
+ return;
+ }
+ if (isTypeScriptEnabled && isContextual(ContextualKeyword._type)) {
+ const lookahead = lookaheadTypeAndKeyword();
+ if (lookahead.type === tt.name && lookahead.contextualKeyword !== ContextualKeyword._from) {
+ // One of these `import type` cases:
+ // import type T = require('T');
+ // import type A from 'A';
+ expectContextual(ContextualKeyword._type);
+ if (lookaheadType() === tt.eq) {
+ tsParseImportEqualsDeclaration();
+ return;
+ }
+ // If this is an `import type...from` statement, then we already ate the
+ // type token, so proceed to the regular import parser.
+ } else if (lookahead.type === tt.star || lookahead.type === tt.braceL) {
+ // One of these `import type` cases, in which case we can eat the type token
+ // and proceed as normal:
+ // import type * as A from 'A';
+ // import type {a} from 'A';
+ expectContextual(ContextualKeyword._type);
+ }
+ // Otherwise, we are importing the name "type".
+ }
+
+ // import '...'
+ if (match(tt.string)) {
+ parseExprAtom();
+ } else {
+ parseMaybeImportReflection();
+ parseImportSpecifiers();
+ expectContextual(ContextualKeyword._from);
+ parseExprAtom();
+ }
+ maybeParseImportAttributes();
+ semicolon();
+}
+
+// eslint-disable-next-line no-unused-vars
+function shouldParseDefaultImport() {
+ return match(tt.name);
+}
+
+function parseImportSpecifierLocal() {
+ parseImportedIdentifier();
+}
+
+// Parses a comma-separated list of module imports.
+function parseImportSpecifiers() {
+ if (isFlowEnabled) {
+ flowStartParseImportSpecifiers();
+ }
+
+ let first = true;
+ if (shouldParseDefaultImport()) {
+ // import defaultObj, { x, y as z } from '...'
+ parseImportSpecifierLocal();
+
+ if (!eat(tt.comma)) return;
+ }
+
+ if (match(tt.star)) {
+ next();
+ expectContextual(ContextualKeyword._as);
+
+ parseImportSpecifierLocal();
+
+ return;
+ }
+
+ expect(tt.braceL);
+ while (!eat(tt.braceR) && !state.error) {
+ if (first) {
+ first = false;
+ } else {
+ // Detect an attempt to deep destructure
+ if (eat(tt.colon)) {
+ unexpected(
+ "ES2015 named imports do not destructure. Use another statement for destructuring after the import.",
+ );
+ }
+
+ expect(tt.comma);
+ if (eat(tt.braceR)) {
+ break;
+ }
+ }
+
+ parseImportSpecifier();
+ }
+}
+
+function parseImportSpecifier() {
+ if (isTypeScriptEnabled) {
+ tsParseImportSpecifier();
+ return;
+ }
+ if (isFlowEnabled) {
+ flowParseImportSpecifier();
+ return;
+ }
+ parseImportedIdentifier();
+ if (isContextual(ContextualKeyword._as)) {
+ state.tokens[state.tokens.length - 1].identifierRole = IdentifierRole.ImportAccess;
+ next();
+ parseImportedIdentifier();
+ }
+}
+
+/**
+ * Parse import attributes like `with {type: "json"}`, or the legacy form
+ * `assert {type: "json"}`.
+ *
+ * Import attributes technically have their own syntax, but are always parseable
+ * as a plain JS object, so just do that for simplicity.
+ */
+function maybeParseImportAttributes() {
+ if (match(tt._with) || (isContextual(ContextualKeyword._assert) && !hasPrecedingLineBreak())) {
+ next();
+ parseObj(false, false);
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/parser/traverser/util.js b/node_modules/sucrase/dist/esm/parser/traverser/util.js
new file mode 100644
index 0000000..6a2b2d9
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/traverser/util.js
@@ -0,0 +1,104 @@
+import {eat, finishToken, lookaheadTypeAndKeyword, match, nextTokenStart} from "../tokenizer/index";
+
+import {formatTokenType, TokenType as tt} from "../tokenizer/types";
+import {charCodes} from "../util/charcodes";
+import {input, state} from "./base";
+
+// ## Parser utilities
+
+// Tests whether parsed token is a contextual keyword.
+export function isContextual(contextualKeyword) {
+ return state.contextualKeyword === contextualKeyword;
+}
+
+export function isLookaheadContextual(contextualKeyword) {
+ const l = lookaheadTypeAndKeyword();
+ return l.type === tt.name && l.contextualKeyword === contextualKeyword;
+}
+
+// Consumes contextual keyword if possible.
+export function eatContextual(contextualKeyword) {
+ return state.contextualKeyword === contextualKeyword && eat(tt.name);
+}
+
+// Asserts that following token is given contextual keyword.
+export function expectContextual(contextualKeyword) {
+ if (!eatContextual(contextualKeyword)) {
+ unexpected();
+ }
+}
+
+// Test whether a semicolon can be inserted at the current position.
+export function canInsertSemicolon() {
+ return match(tt.eof) || match(tt.braceR) || hasPrecedingLineBreak();
+}
+
+export function hasPrecedingLineBreak() {
+ const prevToken = state.tokens[state.tokens.length - 1];
+ const lastTokEnd = prevToken ? prevToken.end : 0;
+ for (let i = lastTokEnd; i < state.start; i++) {
+ const code = input.charCodeAt(i);
+ if (
+ code === charCodes.lineFeed ||
+ code === charCodes.carriageReturn ||
+ code === 0x2028 ||
+ code === 0x2029
+ ) {
+ return true;
+ }
+ }
+ return false;
+}
+
+export function hasFollowingLineBreak() {
+ const nextStart = nextTokenStart();
+ for (let i = state.end; i < nextStart; i++) {
+ const code = input.charCodeAt(i);
+ if (
+ code === charCodes.lineFeed ||
+ code === charCodes.carriageReturn ||
+ code === 0x2028 ||
+ code === 0x2029
+ ) {
+ return true;
+ }
+ }
+ return false;
+}
+
+export function isLineTerminator() {
+ return eat(tt.semi) || canInsertSemicolon();
+}
+
+// Consume a semicolon, or, failing that, see if we are allowed to
+// pretend that there is a semicolon at this position.
+export function semicolon() {
+ if (!isLineTerminator()) {
+ unexpected('Unexpected token, expected ";"');
+ }
+}
+
+// Expect a token of a given type. If found, consume it, otherwise,
+// raise an unexpected token error at given pos.
+export function expect(type) {
+ const matched = eat(type);
+ if (!matched) {
+ unexpected(`Unexpected token, expected "${formatTokenType(type)}"`);
+ }
+}
+
+/**
+ * Transition the parser to an error state. All code needs to be written to naturally unwind in this
+ * state, which allows us to backtrack without exceptions and without error plumbing everywhere.
+ */
+export function unexpected(message = "Unexpected token", pos = state.start) {
+ if (state.error) {
+ return;
+ }
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const err = new SyntaxError(message);
+ err.pos = pos;
+ state.error = err;
+ state.pos = input.length;
+ finishToken(tt.eof);
+}
diff --git a/node_modules/sucrase/dist/esm/parser/util/charcodes.js b/node_modules/sucrase/dist/esm/parser/util/charcodes.js
new file mode 100644
index 0000000..36ea667
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/util/charcodes.js
@@ -0,0 +1,115 @@
+export var charCodes; (function (charCodes) {
+ const backSpace = 8; charCodes[charCodes["backSpace"] = backSpace] = "backSpace";
+ const lineFeed = 10; charCodes[charCodes["lineFeed"] = lineFeed] = "lineFeed"; // '\n'
+ const tab = 9; charCodes[charCodes["tab"] = tab] = "tab"; // '\t'
+ const carriageReturn = 13; charCodes[charCodes["carriageReturn"] = carriageReturn] = "carriageReturn"; // '\r'
+ const shiftOut = 14; charCodes[charCodes["shiftOut"] = shiftOut] = "shiftOut";
+ const space = 32; charCodes[charCodes["space"] = space] = "space";
+ const exclamationMark = 33; charCodes[charCodes["exclamationMark"] = exclamationMark] = "exclamationMark"; // '!'
+ const quotationMark = 34; charCodes[charCodes["quotationMark"] = quotationMark] = "quotationMark"; // '"'
+ const numberSign = 35; charCodes[charCodes["numberSign"] = numberSign] = "numberSign"; // '#'
+ const dollarSign = 36; charCodes[charCodes["dollarSign"] = dollarSign] = "dollarSign"; // '$'
+ const percentSign = 37; charCodes[charCodes["percentSign"] = percentSign] = "percentSign"; // '%'
+ const ampersand = 38; charCodes[charCodes["ampersand"] = ampersand] = "ampersand"; // '&'
+ const apostrophe = 39; charCodes[charCodes["apostrophe"] = apostrophe] = "apostrophe"; // '''
+ const leftParenthesis = 40; charCodes[charCodes["leftParenthesis"] = leftParenthesis] = "leftParenthesis"; // '('
+ const rightParenthesis = 41; charCodes[charCodes["rightParenthesis"] = rightParenthesis] = "rightParenthesis"; // ')'
+ const asterisk = 42; charCodes[charCodes["asterisk"] = asterisk] = "asterisk"; // '*'
+ const plusSign = 43; charCodes[charCodes["plusSign"] = plusSign] = "plusSign"; // '+'
+ const comma = 44; charCodes[charCodes["comma"] = comma] = "comma"; // ','
+ const dash = 45; charCodes[charCodes["dash"] = dash] = "dash"; // '-'
+ const dot = 46; charCodes[charCodes["dot"] = dot] = "dot"; // '.'
+ const slash = 47; charCodes[charCodes["slash"] = slash] = "slash"; // '/'
+ const digit0 = 48; charCodes[charCodes["digit0"] = digit0] = "digit0"; // '0'
+ const digit1 = 49; charCodes[charCodes["digit1"] = digit1] = "digit1"; // '1'
+ const digit2 = 50; charCodes[charCodes["digit2"] = digit2] = "digit2"; // '2'
+ const digit3 = 51; charCodes[charCodes["digit3"] = digit3] = "digit3"; // '3'
+ const digit4 = 52; charCodes[charCodes["digit4"] = digit4] = "digit4"; // '4'
+ const digit5 = 53; charCodes[charCodes["digit5"] = digit5] = "digit5"; // '5'
+ const digit6 = 54; charCodes[charCodes["digit6"] = digit6] = "digit6"; // '6'
+ const digit7 = 55; charCodes[charCodes["digit7"] = digit7] = "digit7"; // '7'
+ const digit8 = 56; charCodes[charCodes["digit8"] = digit8] = "digit8"; // '8'
+ const digit9 = 57; charCodes[charCodes["digit9"] = digit9] = "digit9"; // '9'
+ const colon = 58; charCodes[charCodes["colon"] = colon] = "colon"; // ':'
+ const semicolon = 59; charCodes[charCodes["semicolon"] = semicolon] = "semicolon"; // ';'
+ const lessThan = 60; charCodes[charCodes["lessThan"] = lessThan] = "lessThan"; // '<'
+ const equalsTo = 61; charCodes[charCodes["equalsTo"] = equalsTo] = "equalsTo"; // '='
+ const greaterThan = 62; charCodes[charCodes["greaterThan"] = greaterThan] = "greaterThan"; // '>'
+ const questionMark = 63; charCodes[charCodes["questionMark"] = questionMark] = "questionMark"; // '?'
+ const atSign = 64; charCodes[charCodes["atSign"] = atSign] = "atSign"; // '@'
+ const uppercaseA = 65; charCodes[charCodes["uppercaseA"] = uppercaseA] = "uppercaseA"; // 'A'
+ const uppercaseB = 66; charCodes[charCodes["uppercaseB"] = uppercaseB] = "uppercaseB"; // 'B'
+ const uppercaseC = 67; charCodes[charCodes["uppercaseC"] = uppercaseC] = "uppercaseC"; // 'C'
+ const uppercaseD = 68; charCodes[charCodes["uppercaseD"] = uppercaseD] = "uppercaseD"; // 'D'
+ const uppercaseE = 69; charCodes[charCodes["uppercaseE"] = uppercaseE] = "uppercaseE"; // 'E'
+ const uppercaseF = 70; charCodes[charCodes["uppercaseF"] = uppercaseF] = "uppercaseF"; // 'F'
+ const uppercaseG = 71; charCodes[charCodes["uppercaseG"] = uppercaseG] = "uppercaseG"; // 'G'
+ const uppercaseH = 72; charCodes[charCodes["uppercaseH"] = uppercaseH] = "uppercaseH"; // 'H'
+ const uppercaseI = 73; charCodes[charCodes["uppercaseI"] = uppercaseI] = "uppercaseI"; // 'I'
+ const uppercaseJ = 74; charCodes[charCodes["uppercaseJ"] = uppercaseJ] = "uppercaseJ"; // 'J'
+ const uppercaseK = 75; charCodes[charCodes["uppercaseK"] = uppercaseK] = "uppercaseK"; // 'K'
+ const uppercaseL = 76; charCodes[charCodes["uppercaseL"] = uppercaseL] = "uppercaseL"; // 'L'
+ const uppercaseM = 77; charCodes[charCodes["uppercaseM"] = uppercaseM] = "uppercaseM"; // 'M'
+ const uppercaseN = 78; charCodes[charCodes["uppercaseN"] = uppercaseN] = "uppercaseN"; // 'N'
+ const uppercaseO = 79; charCodes[charCodes["uppercaseO"] = uppercaseO] = "uppercaseO"; // 'O'
+ const uppercaseP = 80; charCodes[charCodes["uppercaseP"] = uppercaseP] = "uppercaseP"; // 'P'
+ const uppercaseQ = 81; charCodes[charCodes["uppercaseQ"] = uppercaseQ] = "uppercaseQ"; // 'Q'
+ const uppercaseR = 82; charCodes[charCodes["uppercaseR"] = uppercaseR] = "uppercaseR"; // 'R'
+ const uppercaseS = 83; charCodes[charCodes["uppercaseS"] = uppercaseS] = "uppercaseS"; // 'S'
+ const uppercaseT = 84; charCodes[charCodes["uppercaseT"] = uppercaseT] = "uppercaseT"; // 'T'
+ const uppercaseU = 85; charCodes[charCodes["uppercaseU"] = uppercaseU] = "uppercaseU"; // 'U'
+ const uppercaseV = 86; charCodes[charCodes["uppercaseV"] = uppercaseV] = "uppercaseV"; // 'V'
+ const uppercaseW = 87; charCodes[charCodes["uppercaseW"] = uppercaseW] = "uppercaseW"; // 'W'
+ const uppercaseX = 88; charCodes[charCodes["uppercaseX"] = uppercaseX] = "uppercaseX"; // 'X'
+ const uppercaseY = 89; charCodes[charCodes["uppercaseY"] = uppercaseY] = "uppercaseY"; // 'Y'
+ const uppercaseZ = 90; charCodes[charCodes["uppercaseZ"] = uppercaseZ] = "uppercaseZ"; // 'Z'
+ const leftSquareBracket = 91; charCodes[charCodes["leftSquareBracket"] = leftSquareBracket] = "leftSquareBracket"; // '['
+ const backslash = 92; charCodes[charCodes["backslash"] = backslash] = "backslash"; // '\ '
+ const rightSquareBracket = 93; charCodes[charCodes["rightSquareBracket"] = rightSquareBracket] = "rightSquareBracket"; // ']'
+ const caret = 94; charCodes[charCodes["caret"] = caret] = "caret"; // '^'
+ const underscore = 95; charCodes[charCodes["underscore"] = underscore] = "underscore"; // '_'
+ const graveAccent = 96; charCodes[charCodes["graveAccent"] = graveAccent] = "graveAccent"; // '`'
+ const lowercaseA = 97; charCodes[charCodes["lowercaseA"] = lowercaseA] = "lowercaseA"; // 'a'
+ const lowercaseB = 98; charCodes[charCodes["lowercaseB"] = lowercaseB] = "lowercaseB"; // 'b'
+ const lowercaseC = 99; charCodes[charCodes["lowercaseC"] = lowercaseC] = "lowercaseC"; // 'c'
+ const lowercaseD = 100; charCodes[charCodes["lowercaseD"] = lowercaseD] = "lowercaseD"; // 'd'
+ const lowercaseE = 101; charCodes[charCodes["lowercaseE"] = lowercaseE] = "lowercaseE"; // 'e'
+ const lowercaseF = 102; charCodes[charCodes["lowercaseF"] = lowercaseF] = "lowercaseF"; // 'f'
+ const lowercaseG = 103; charCodes[charCodes["lowercaseG"] = lowercaseG] = "lowercaseG"; // 'g'
+ const lowercaseH = 104; charCodes[charCodes["lowercaseH"] = lowercaseH] = "lowercaseH"; // 'h'
+ const lowercaseI = 105; charCodes[charCodes["lowercaseI"] = lowercaseI] = "lowercaseI"; // 'i'
+ const lowercaseJ = 106; charCodes[charCodes["lowercaseJ"] = lowercaseJ] = "lowercaseJ"; // 'j'
+ const lowercaseK = 107; charCodes[charCodes["lowercaseK"] = lowercaseK] = "lowercaseK"; // 'k'
+ const lowercaseL = 108; charCodes[charCodes["lowercaseL"] = lowercaseL] = "lowercaseL"; // 'l'
+ const lowercaseM = 109; charCodes[charCodes["lowercaseM"] = lowercaseM] = "lowercaseM"; // 'm'
+ const lowercaseN = 110; charCodes[charCodes["lowercaseN"] = lowercaseN] = "lowercaseN"; // 'n'
+ const lowercaseO = 111; charCodes[charCodes["lowercaseO"] = lowercaseO] = "lowercaseO"; // 'o'
+ const lowercaseP = 112; charCodes[charCodes["lowercaseP"] = lowercaseP] = "lowercaseP"; // 'p'
+ const lowercaseQ = 113; charCodes[charCodes["lowercaseQ"] = lowercaseQ] = "lowercaseQ"; // 'q'
+ const lowercaseR = 114; charCodes[charCodes["lowercaseR"] = lowercaseR] = "lowercaseR"; // 'r'
+ const lowercaseS = 115; charCodes[charCodes["lowercaseS"] = lowercaseS] = "lowercaseS"; // 's'
+ const lowercaseT = 116; charCodes[charCodes["lowercaseT"] = lowercaseT] = "lowercaseT"; // 't'
+ const lowercaseU = 117; charCodes[charCodes["lowercaseU"] = lowercaseU] = "lowercaseU"; // 'u'
+ const lowercaseV = 118; charCodes[charCodes["lowercaseV"] = lowercaseV] = "lowercaseV"; // 'v'
+ const lowercaseW = 119; charCodes[charCodes["lowercaseW"] = lowercaseW] = "lowercaseW"; // 'w'
+ const lowercaseX = 120; charCodes[charCodes["lowercaseX"] = lowercaseX] = "lowercaseX"; // 'x'
+ const lowercaseY = 121; charCodes[charCodes["lowercaseY"] = lowercaseY] = "lowercaseY"; // 'y'
+ const lowercaseZ = 122; charCodes[charCodes["lowercaseZ"] = lowercaseZ] = "lowercaseZ"; // 'z'
+ const leftCurlyBrace = 123; charCodes[charCodes["leftCurlyBrace"] = leftCurlyBrace] = "leftCurlyBrace"; // '{'
+ const verticalBar = 124; charCodes[charCodes["verticalBar"] = verticalBar] = "verticalBar"; // '|'
+ const rightCurlyBrace = 125; charCodes[charCodes["rightCurlyBrace"] = rightCurlyBrace] = "rightCurlyBrace"; // '}'
+ const tilde = 126; charCodes[charCodes["tilde"] = tilde] = "tilde"; // '~'
+ const nonBreakingSpace = 160; charCodes[charCodes["nonBreakingSpace"] = nonBreakingSpace] = "nonBreakingSpace";
+ // eslint-disable-next-line no-irregular-whitespace
+ const oghamSpaceMark = 5760; charCodes[charCodes["oghamSpaceMark"] = oghamSpaceMark] = "oghamSpaceMark"; // ' '
+ const lineSeparator = 8232; charCodes[charCodes["lineSeparator"] = lineSeparator] = "lineSeparator";
+ const paragraphSeparator = 8233; charCodes[charCodes["paragraphSeparator"] = paragraphSeparator] = "paragraphSeparator";
+})(charCodes || (charCodes = {}));
+
+export function isDigit(code) {
+ return (
+ (code >= charCodes.digit0 && code <= charCodes.digit9) ||
+ (code >= charCodes.lowercaseA && code <= charCodes.lowercaseF) ||
+ (code >= charCodes.uppercaseA && code <= charCodes.uppercaseF)
+ );
+}
diff --git a/node_modules/sucrase/dist/esm/parser/util/identifier.js b/node_modules/sucrase/dist/esm/parser/util/identifier.js
new file mode 100644
index 0000000..33a6bb1
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/util/identifier.js
@@ -0,0 +1,34 @@
+import {charCodes} from "./charcodes";
+import {WHITESPACE_CHARS} from "./whitespace";
+
+function computeIsIdentifierChar(code) {
+ if (code < 48) return code === 36;
+ if (code < 58) return true;
+ if (code < 65) return false;
+ if (code < 91) return true;
+ if (code < 97) return code === 95;
+ if (code < 123) return true;
+ if (code < 128) return false;
+ throw new Error("Should not be called with non-ASCII char code.");
+}
+
+export const IS_IDENTIFIER_CHAR = new Uint8Array(65536);
+for (let i = 0; i < 128; i++) {
+ IS_IDENTIFIER_CHAR[i] = computeIsIdentifierChar(i) ? 1 : 0;
+}
+for (let i = 128; i < 65536; i++) {
+ IS_IDENTIFIER_CHAR[i] = 1;
+}
+// Aside from whitespace and newlines, all characters outside the ASCII space are either
+// identifier characters or invalid. Since we're not performing code validation, we can just
+// treat all invalid characters as identifier characters.
+for (const whitespaceChar of WHITESPACE_CHARS) {
+ IS_IDENTIFIER_CHAR[whitespaceChar] = 0;
+}
+IS_IDENTIFIER_CHAR[0x2028] = 0;
+IS_IDENTIFIER_CHAR[0x2029] = 0;
+
+export const IS_IDENTIFIER_START = IS_IDENTIFIER_CHAR.slice();
+for (let numChar = charCodes.digit0; numChar <= charCodes.digit9; numChar++) {
+ IS_IDENTIFIER_START[numChar] = 0;
+}
diff --git a/node_modules/sucrase/dist/esm/parser/util/whitespace.js b/node_modules/sucrase/dist/esm/parser/util/whitespace.js
new file mode 100644
index 0000000..303b8a6
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/parser/util/whitespace.js
@@ -0,0 +1,33 @@
+import {charCodes} from "./charcodes";
+
+// https://tc39.github.io/ecma262/#sec-white-space
+export const WHITESPACE_CHARS = [
+ 0x0009,
+ 0x000b,
+ 0x000c,
+ charCodes.space,
+ charCodes.nonBreakingSpace,
+ charCodes.oghamSpaceMark,
+ 0x2000, // EN QUAD
+ 0x2001, // EM QUAD
+ 0x2002, // EN SPACE
+ 0x2003, // EM SPACE
+ 0x2004, // THREE-PER-EM SPACE
+ 0x2005, // FOUR-PER-EM SPACE
+ 0x2006, // SIX-PER-EM SPACE
+ 0x2007, // FIGURE SPACE
+ 0x2008, // PUNCTUATION SPACE
+ 0x2009, // THIN SPACE
+ 0x200a, // HAIR SPACE
+ 0x202f, // NARROW NO-BREAK SPACE
+ 0x205f, // MEDIUM MATHEMATICAL SPACE
+ 0x3000, // IDEOGRAPHIC SPACE
+ 0xfeff, // ZERO WIDTH NO-BREAK SPACE
+];
+
+export const skipWhiteSpace = /(?:\s|\/\/.*|\/\*[^]*?\*\/)*/g;
+
+export const IS_WHITESPACE = new Uint8Array(65536);
+for (const char of WHITESPACE_CHARS) {
+ IS_WHITESPACE[char] = 1;
+}
diff --git a/node_modules/sucrase/dist/esm/register.js b/node_modules/sucrase/dist/esm/register.js
new file mode 100644
index 0000000..ca30cab
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/register.js
@@ -0,0 +1,88 @@
+import * as pirates from "pirates";
+
+import { transform} from "./index";
+
+
+
+
+
+
+
+
+export function addHook(
+ extension,
+ sucraseOptions,
+ hookOptions,
+) {
+ let mergedSucraseOptions = sucraseOptions;
+ const sucraseOptionsEnvJSON = process.env.SUCRASE_OPTIONS;
+ if (sucraseOptionsEnvJSON) {
+ mergedSucraseOptions = {...mergedSucraseOptions, ...JSON.parse(sucraseOptionsEnvJSON)};
+ }
+ return pirates.addHook(
+ (code, filePath) => {
+ const {code: transformedCode, sourceMap} = transform(code, {
+ ...mergedSucraseOptions,
+ sourceMapOptions: {compiledFilename: filePath},
+ filePath,
+ });
+ const mapBase64 = Buffer.from(JSON.stringify(sourceMap)).toString("base64");
+ const suffix = `//# sourceMappingURL=data:application/json;charset=utf-8;base64,${mapBase64}`;
+ return `${transformedCode}\n${suffix}`;
+ },
+ {...hookOptions, exts: [extension]},
+ );
+}
+
+export function registerJS(hookOptions) {
+ return addHook(".js", {transforms: ["imports", "flow", "jsx"]}, hookOptions);
+}
+
+export function registerJSX(hookOptions) {
+ return addHook(".jsx", {transforms: ["imports", "flow", "jsx"]}, hookOptions);
+}
+
+export function registerTS(hookOptions) {
+ return addHook(".ts", {transforms: ["imports", "typescript"]}, hookOptions);
+}
+
+export function registerTSX(hookOptions) {
+ return addHook(".tsx", {transforms: ["imports", "typescript", "jsx"]}, hookOptions);
+}
+
+export function registerTSLegacyModuleInterop(hookOptions) {
+ return addHook(
+ ".ts",
+ {
+ transforms: ["imports", "typescript"],
+ enableLegacyTypeScriptModuleInterop: true,
+ },
+ hookOptions,
+ );
+}
+
+export function registerTSXLegacyModuleInterop(hookOptions) {
+ return addHook(
+ ".tsx",
+ {
+ transforms: ["imports", "typescript", "jsx"],
+ enableLegacyTypeScriptModuleInterop: true,
+ },
+ hookOptions,
+ );
+}
+
+export function registerAll(hookOptions) {
+ const reverts = [
+ registerJS(hookOptions),
+ registerJSX(hookOptions),
+ registerTS(hookOptions),
+ registerTSX(hookOptions),
+ ];
+
+ return () => {
+ for (const fn of reverts) {
+ fn();
+ }
+ };
+}
diff --git a/node_modules/sucrase/dist/esm/transformers/CJSImportTransformer.js b/node_modules/sucrase/dist/esm/transformers/CJSImportTransformer.js
new file mode 100644
index 0000000..78cf896
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/transformers/CJSImportTransformer.js
@@ -0,0 +1,916 @@
+
+
+
+import {IdentifierRole, isDeclaration, isObjectShorthandDeclaration} from "../parser/tokenizer";
+import {ContextualKeyword} from "../parser/tokenizer/keywords";
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+import elideImportEquals from "../util/elideImportEquals";
+import getDeclarationInfo, {
+
+ EMPTY_DECLARATION_INFO,
+} from "../util/getDeclarationInfo";
+import getImportExportSpecifierInfo from "../util/getImportExportSpecifierInfo";
+import isExportFrom from "../util/isExportFrom";
+import {removeMaybeImportAttributes} from "../util/removeMaybeImportAttributes";
+import shouldElideDefaultExport from "../util/shouldElideDefaultExport";
+
+
+import Transformer from "./Transformer";
+
+/**
+ * Class for editing import statements when we are transforming to commonjs.
+ */
+export default class CJSImportTransformer extends Transformer {
+ __init() {this.hadExport = false}
+ __init2() {this.hadNamedExport = false}
+ __init3() {this.hadDefaultExport = false}
+
+
+ constructor(
+ rootTransformer,
+ tokens,
+ importProcessor,
+ nameManager,
+ helperManager,
+ reactHotLoaderTransformer,
+ enableLegacyBabel5ModuleInterop,
+ enableLegacyTypeScriptModuleInterop,
+ isTypeScriptTransformEnabled,
+ isFlowTransformEnabled,
+ preserveDynamicImport,
+ keepUnusedImports,
+ ) {
+ super();this.rootTransformer = rootTransformer;this.tokens = tokens;this.importProcessor = importProcessor;this.nameManager = nameManager;this.helperManager = helperManager;this.reactHotLoaderTransformer = reactHotLoaderTransformer;this.enableLegacyBabel5ModuleInterop = enableLegacyBabel5ModuleInterop;this.enableLegacyTypeScriptModuleInterop = enableLegacyTypeScriptModuleInterop;this.isTypeScriptTransformEnabled = isTypeScriptTransformEnabled;this.isFlowTransformEnabled = isFlowTransformEnabled;this.preserveDynamicImport = preserveDynamicImport;this.keepUnusedImports = keepUnusedImports;CJSImportTransformer.prototype.__init.call(this);CJSImportTransformer.prototype.__init2.call(this);CJSImportTransformer.prototype.__init3.call(this);;
+ this.declarationInfo = isTypeScriptTransformEnabled
+ ? getDeclarationInfo(tokens)
+ : EMPTY_DECLARATION_INFO;
+ }
+
+ getPrefixCode() {
+ let prefix = "";
+ if (this.hadExport) {
+ prefix += 'Object.defineProperty(exports, "__esModule", {value: true});';
+ }
+ return prefix;
+ }
+
+ getSuffixCode() {
+ if (this.enableLegacyBabel5ModuleInterop && this.hadDefaultExport && !this.hadNamedExport) {
+ return "\nmodule.exports = exports.default;\n";
+ }
+ return "";
+ }
+
+ process() {
+ // TypeScript `import foo = require('foo');` should always just be translated to plain require.
+ if (this.tokens.matches3(tt._import, tt.name, tt.eq)) {
+ return this.processImportEquals();
+ }
+ if (this.tokens.matches1(tt._import)) {
+ this.processImport();
+ return true;
+ }
+ if (this.tokens.matches2(tt._export, tt.eq)) {
+ this.tokens.replaceToken("module.exports");
+ return true;
+ }
+ if (this.tokens.matches1(tt._export) && !this.tokens.currentToken().isType) {
+ this.hadExport = true;
+ return this.processExport();
+ }
+ if (this.tokens.matches2(tt.name, tt.postIncDec)) {
+ // Fall through to normal identifier matching if this doesn't apply.
+ if (this.processPostIncDec()) {
+ return true;
+ }
+ }
+ if (this.tokens.matches1(tt.name) || this.tokens.matches1(tt.jsxName)) {
+ return this.processIdentifier();
+ }
+ if (this.tokens.matches1(tt.eq)) {
+ return this.processAssignment();
+ }
+ if (this.tokens.matches1(tt.assign)) {
+ return this.processComplexAssignment();
+ }
+ if (this.tokens.matches1(tt.preIncDec)) {
+ return this.processPreIncDec();
+ }
+ return false;
+ }
+
+ processImportEquals() {
+ const importName = this.tokens.identifierNameAtIndex(this.tokens.currentIndex() + 1);
+ if (this.importProcessor.shouldAutomaticallyElideImportedName(importName)) {
+ // If this name is only used as a type, elide the whole import.
+ elideImportEquals(this.tokens);
+ } else {
+ // Otherwise, switch `import` to `const`.
+ this.tokens.replaceToken("const");
+ }
+ return true;
+ }
+
+ /**
+ * Transform this:
+ * import foo, {bar} from 'baz';
+ * into
+ * var _baz = require('baz'); var _baz2 = _interopRequireDefault(_baz);
+ *
+ * The import code was already generated in the import preprocessing step, so
+ * we just need to look it up.
+ */
+ processImport() {
+ if (this.tokens.matches2(tt._import, tt.parenL)) {
+ if (this.preserveDynamicImport) {
+ // Bail out, only making progress for this one token.
+ this.tokens.copyToken();
+ return;
+ }
+ const requireWrapper = this.enableLegacyTypeScriptModuleInterop
+ ? ""
+ : `${this.helperManager.getHelperName("interopRequireWildcard")}(`;
+ this.tokens.replaceToken(`Promise.resolve().then(() => ${requireWrapper}require`);
+ const contextId = this.tokens.currentToken().contextId;
+ if (contextId == null) {
+ throw new Error("Expected context ID on dynamic import invocation.");
+ }
+ this.tokens.copyToken();
+ while (!this.tokens.matchesContextIdAndLabel(tt.parenR, contextId)) {
+ this.rootTransformer.processToken();
+ }
+ this.tokens.replaceToken(requireWrapper ? ")))" : "))");
+ return;
+ }
+
+ const shouldElideImport = this.removeImportAndDetectIfShouldElide();
+ if (shouldElideImport) {
+ this.tokens.removeToken();
+ } else {
+ const path = this.tokens.stringValue();
+ this.tokens.replaceTokenTrimmingLeftWhitespace(this.importProcessor.claimImportCode(path));
+ this.tokens.appendCode(this.importProcessor.claimImportCode(path));
+ }
+ removeMaybeImportAttributes(this.tokens);
+ if (this.tokens.matches1(tt.semi)) {
+ this.tokens.removeToken();
+ }
+ }
+
+ /**
+ * Erase this import (since any CJS output would be completely different), and
+ * return true if this import is should be elided due to being a type-only
+ * import. Such imports will not be emitted at all to avoid side effects.
+ *
+ * Import elision only happens with the TypeScript or Flow transforms enabled.
+ *
+ * TODO: This function has some awkward overlap with
+ * CJSImportProcessor.pruneTypeOnlyImports , and the two should be unified.
+ * That function handles TypeScript implicit import name elision, and removes
+ * an import if all typical imported names (without `type`) are removed due
+ * to being type-only imports. This function handles Flow import removal and
+ * properly distinguishes `import 'foo'` from `import {} from 'foo'` for TS
+ * purposes.
+ *
+ * The position should end at the import string.
+ */
+ removeImportAndDetectIfShouldElide() {
+ this.tokens.removeInitialToken();
+ if (
+ this.tokens.matchesContextual(ContextualKeyword._type) &&
+ !this.tokens.matches1AtIndex(this.tokens.currentIndex() + 1, tt.comma) &&
+ !this.tokens.matchesContextualAtIndex(this.tokens.currentIndex() + 1, ContextualKeyword._from)
+ ) {
+ // This is an "import type" statement, so exit early.
+ this.removeRemainingImport();
+ return true;
+ }
+
+ if (this.tokens.matches1(tt.name) || this.tokens.matches1(tt.star)) {
+ // We have a default import or namespace import, so there must be some
+ // non-type import.
+ this.removeRemainingImport();
+ return false;
+ }
+
+ if (this.tokens.matches1(tt.string)) {
+ // This is a bare import, so we should proceed with the import.
+ return false;
+ }
+
+ let foundNonTypeImport = false;
+ let foundAnyNamedImport = false;
+ while (!this.tokens.matches1(tt.string)) {
+ // Check if any named imports are of the form "foo" or "foo as bar", with
+ // no leading "type".
+ if (
+ (!foundNonTypeImport && this.tokens.matches1(tt.braceL)) ||
+ this.tokens.matches1(tt.comma)
+ ) {
+ this.tokens.removeToken();
+ if (!this.tokens.matches1(tt.braceR)) {
+ foundAnyNamedImport = true;
+ }
+ if (
+ this.tokens.matches2(tt.name, tt.comma) ||
+ this.tokens.matches2(tt.name, tt.braceR) ||
+ this.tokens.matches4(tt.name, tt.name, tt.name, tt.comma) ||
+ this.tokens.matches4(tt.name, tt.name, tt.name, tt.braceR)
+ ) {
+ foundNonTypeImport = true;
+ }
+ }
+ this.tokens.removeToken();
+ }
+ if (this.keepUnusedImports) {
+ return false;
+ }
+ if (this.isTypeScriptTransformEnabled) {
+ return !foundNonTypeImport;
+ } else if (this.isFlowTransformEnabled) {
+ // In Flow, unlike TS, `import {} from 'foo';` preserves the import.
+ return foundAnyNamedImport && !foundNonTypeImport;
+ } else {
+ return false;
+ }
+ }
+
+ removeRemainingImport() {
+ while (!this.tokens.matches1(tt.string)) {
+ this.tokens.removeToken();
+ }
+ }
+
+ processIdentifier() {
+ const token = this.tokens.currentToken();
+ if (token.shadowsGlobal) {
+ return false;
+ }
+
+ if (token.identifierRole === IdentifierRole.ObjectShorthand) {
+ return this.processObjectShorthand();
+ }
+
+ if (token.identifierRole !== IdentifierRole.Access) {
+ return false;
+ }
+ const replacement = this.importProcessor.getIdentifierReplacement(
+ this.tokens.identifierNameForToken(token),
+ );
+ if (!replacement) {
+ return false;
+ }
+ // Tolerate any number of closing parens while looking for an opening paren
+ // that indicates a function call.
+ let possibleOpenParenIndex = this.tokens.currentIndex() + 1;
+ while (
+ possibleOpenParenIndex < this.tokens.tokens.length &&
+ this.tokens.tokens[possibleOpenParenIndex].type === tt.parenR
+ ) {
+ possibleOpenParenIndex++;
+ }
+ // Avoid treating imported functions as methods of their `exports` object
+ // by using `(0, f)` when the identifier is in a paren expression. Else
+ // use `Function.prototype.call` when the identifier is a guaranteed
+ // function call. When using `call`, pass undefined as the context.
+ if (this.tokens.tokens[possibleOpenParenIndex].type === tt.parenL) {
+ if (
+ this.tokens.tokenAtRelativeIndex(1).type === tt.parenL &&
+ this.tokens.tokenAtRelativeIndex(-1).type !== tt._new
+ ) {
+ this.tokens.replaceToken(`${replacement}.call(void 0, `);
+ // Remove the old paren.
+ this.tokens.removeToken();
+ // Balance out the new paren.
+ this.rootTransformer.processBalancedCode();
+ this.tokens.copyExpectedToken(tt.parenR);
+ } else {
+ // See here: http://2ality.com/2015/12/references.html
+ this.tokens.replaceToken(`(0, ${replacement})`);
+ }
+ } else {
+ this.tokens.replaceToken(replacement);
+ }
+ return true;
+ }
+
+ processObjectShorthand() {
+ const identifier = this.tokens.identifierName();
+ const replacement = this.importProcessor.getIdentifierReplacement(identifier);
+ if (!replacement) {
+ return false;
+ }
+ this.tokens.replaceToken(`${identifier}: ${replacement}`);
+ return true;
+ }
+
+ processExport() {
+ if (
+ this.tokens.matches2(tt._export, tt._enum) ||
+ this.tokens.matches3(tt._export, tt._const, tt._enum)
+ ) {
+ this.hadNamedExport = true;
+ // Let the TypeScript transform handle it.
+ return false;
+ }
+ if (this.tokens.matches2(tt._export, tt._default)) {
+ if (this.tokens.matches3(tt._export, tt._default, tt._enum)) {
+ this.hadDefaultExport = true;
+ // Flow export default enums need some special handling, so handle them
+ // in that tranform rather than this one.
+ return false;
+ }
+ this.processExportDefault();
+ return true;
+ } else if (this.tokens.matches2(tt._export, tt.braceL)) {
+ this.processExportBindings();
+ return true;
+ } else if (
+ this.tokens.matches2(tt._export, tt.name) &&
+ this.tokens.matchesContextualAtIndex(this.tokens.currentIndex() + 1, ContextualKeyword._type)
+ ) {
+ // export type {a};
+ // export type {a as b};
+ // export type {a} from './b';
+ // export type * from './b';
+ // export type * as ns from './b';
+ this.tokens.removeInitialToken();
+ this.tokens.removeToken();
+ if (this.tokens.matches1(tt.braceL)) {
+ while (!this.tokens.matches1(tt.braceR)) {
+ this.tokens.removeToken();
+ }
+ this.tokens.removeToken();
+ } else {
+ // *
+ this.tokens.removeToken();
+ if (this.tokens.matches1(tt._as)) {
+ // as
+ this.tokens.removeToken();
+ // ns
+ this.tokens.removeToken();
+ }
+ }
+ // Remove type re-export `... } from './T'`
+ if (
+ this.tokens.matchesContextual(ContextualKeyword._from) &&
+ this.tokens.matches1AtIndex(this.tokens.currentIndex() + 1, tt.string)
+ ) {
+ this.tokens.removeToken();
+ this.tokens.removeToken();
+ removeMaybeImportAttributes(this.tokens);
+ }
+ return true;
+ }
+ this.hadNamedExport = true;
+ if (
+ this.tokens.matches2(tt._export, tt._var) ||
+ this.tokens.matches2(tt._export, tt._let) ||
+ this.tokens.matches2(tt._export, tt._const)
+ ) {
+ this.processExportVar();
+ return true;
+ } else if (
+ this.tokens.matches2(tt._export, tt._function) ||
+ // export async function
+ this.tokens.matches3(tt._export, tt.name, tt._function)
+ ) {
+ this.processExportFunction();
+ return true;
+ } else if (
+ this.tokens.matches2(tt._export, tt._class) ||
+ this.tokens.matches3(tt._export, tt._abstract, tt._class) ||
+ this.tokens.matches2(tt._export, tt.at)
+ ) {
+ this.processExportClass();
+ return true;
+ } else if (this.tokens.matches2(tt._export, tt.star)) {
+ this.processExportStar();
+ return true;
+ } else {
+ throw new Error("Unrecognized export syntax.");
+ }
+ }
+
+ processAssignment() {
+ const index = this.tokens.currentIndex();
+ const identifierToken = this.tokens.tokens[index - 1];
+ // If the LHS is a type identifier, this must be a declaration like `let a: b = c;`,
+ // with `b` as the identifier, so nothing needs to be done in that case.
+ if (identifierToken.isType || identifierToken.type !== tt.name) {
+ return false;
+ }
+ if (identifierToken.shadowsGlobal) {
+ return false;
+ }
+ if (index >= 2 && this.tokens.matches1AtIndex(index - 2, tt.dot)) {
+ return false;
+ }
+ if (index >= 2 && [tt._var, tt._let, tt._const].includes(this.tokens.tokens[index - 2].type)) {
+ // Declarations don't need an extra assignment. This doesn't avoid the
+ // assignment for comma-separated declarations, but it's still correct
+ // since the assignment is just redundant.
+ return false;
+ }
+ const assignmentSnippet = this.importProcessor.resolveExportBinding(
+ this.tokens.identifierNameForToken(identifierToken),
+ );
+ if (!assignmentSnippet) {
+ return false;
+ }
+ this.tokens.copyToken();
+ this.tokens.appendCode(` ${assignmentSnippet} =`);
+ return true;
+ }
+
+ /**
+ * Process something like `a += 3`, where `a` might be an exported value.
+ */
+ processComplexAssignment() {
+ const index = this.tokens.currentIndex();
+ const identifierToken = this.tokens.tokens[index - 1];
+ if (identifierToken.type !== tt.name) {
+ return false;
+ }
+ if (identifierToken.shadowsGlobal) {
+ return false;
+ }
+ if (index >= 2 && this.tokens.matches1AtIndex(index - 2, tt.dot)) {
+ return false;
+ }
+ const assignmentSnippet = this.importProcessor.resolveExportBinding(
+ this.tokens.identifierNameForToken(identifierToken),
+ );
+ if (!assignmentSnippet) {
+ return false;
+ }
+ this.tokens.appendCode(` = ${assignmentSnippet}`);
+ this.tokens.copyToken();
+ return true;
+ }
+
+ /**
+ * Process something like `++a`, where `a` might be an exported value.
+ */
+ processPreIncDec() {
+ const index = this.tokens.currentIndex();
+ const identifierToken = this.tokens.tokens[index + 1];
+ if (identifierToken.type !== tt.name) {
+ return false;
+ }
+ if (identifierToken.shadowsGlobal) {
+ return false;
+ }
+ // Ignore things like ++a.b and ++a[b] and ++a().b.
+ if (
+ index + 2 < this.tokens.tokens.length &&
+ (this.tokens.matches1AtIndex(index + 2, tt.dot) ||
+ this.tokens.matches1AtIndex(index + 2, tt.bracketL) ||
+ this.tokens.matches1AtIndex(index + 2, tt.parenL))
+ ) {
+ return false;
+ }
+ const identifierName = this.tokens.identifierNameForToken(identifierToken);
+ const assignmentSnippet = this.importProcessor.resolveExportBinding(identifierName);
+ if (!assignmentSnippet) {
+ return false;
+ }
+ this.tokens.appendCode(`${assignmentSnippet} = `);
+ this.tokens.copyToken();
+ return true;
+ }
+
+ /**
+ * Process something like `a++`, where `a` might be an exported value.
+ * This starts at the `a`, not at the `++`.
+ */
+ processPostIncDec() {
+ const index = this.tokens.currentIndex();
+ const identifierToken = this.tokens.tokens[index];
+ const operatorToken = this.tokens.tokens[index + 1];
+ if (identifierToken.type !== tt.name) {
+ return false;
+ }
+ if (identifierToken.shadowsGlobal) {
+ return false;
+ }
+ if (index >= 1 && this.tokens.matches1AtIndex(index - 1, tt.dot)) {
+ return false;
+ }
+ const identifierName = this.tokens.identifierNameForToken(identifierToken);
+ const assignmentSnippet = this.importProcessor.resolveExportBinding(identifierName);
+ if (!assignmentSnippet) {
+ return false;
+ }
+ const operatorCode = this.tokens.rawCodeForToken(operatorToken);
+ // We might also replace the identifier with something like exports.x, so
+ // do that replacement here as well.
+ const base = this.importProcessor.getIdentifierReplacement(identifierName) || identifierName;
+ if (operatorCode === "++") {
+ this.tokens.replaceToken(`(${base} = ${assignmentSnippet} = ${base} + 1, ${base} - 1)`);
+ } else if (operatorCode === "--") {
+ this.tokens.replaceToken(`(${base} = ${assignmentSnippet} = ${base} - 1, ${base} + 1)`);
+ } else {
+ throw new Error(`Unexpected operator: ${operatorCode}`);
+ }
+ this.tokens.removeToken();
+ return true;
+ }
+
+ processExportDefault() {
+ let exportedRuntimeValue = true;
+ if (
+ this.tokens.matches4(tt._export, tt._default, tt._function, tt.name) ||
+ // export default async function
+ (this.tokens.matches5(tt._export, tt._default, tt.name, tt._function, tt.name) &&
+ this.tokens.matchesContextualAtIndex(
+ this.tokens.currentIndex() + 2,
+ ContextualKeyword._async,
+ ))
+ ) {
+ this.tokens.removeInitialToken();
+ this.tokens.removeToken();
+ // Named function export case: change it to a top-level function
+ // declaration followed by exports statement.
+ const name = this.processNamedFunction();
+ this.tokens.appendCode(` exports.default = ${name};`);
+ } else if (
+ this.tokens.matches4(tt._export, tt._default, tt._class, tt.name) ||
+ this.tokens.matches5(tt._export, tt._default, tt._abstract, tt._class, tt.name) ||
+ this.tokens.matches3(tt._export, tt._default, tt.at)
+ ) {
+ this.tokens.removeInitialToken();
+ this.tokens.removeToken();
+ this.copyDecorators();
+ if (this.tokens.matches1(tt._abstract)) {
+ this.tokens.removeToken();
+ }
+ const name = this.rootTransformer.processNamedClass();
+ this.tokens.appendCode(` exports.default = ${name};`);
+ // After this point, this is a plain "export default E" statement.
+ } else if (
+ shouldElideDefaultExport(
+ this.isTypeScriptTransformEnabled,
+ this.keepUnusedImports,
+ this.tokens,
+ this.declarationInfo,
+ )
+ ) {
+ // If the exported value is just an identifier and should be elided by TypeScript
+ // rules, then remove it entirely. It will always have the form `export default e`,
+ // where `e` is an identifier.
+ exportedRuntimeValue = false;
+ this.tokens.removeInitialToken();
+ this.tokens.removeToken();
+ this.tokens.removeToken();
+ } else if (this.reactHotLoaderTransformer) {
+ // We need to assign E to a variable. Change "export default E" to
+ // "let _default; exports.default = _default = E"
+ const defaultVarName = this.nameManager.claimFreeName("_default");
+ this.tokens.replaceToken(`let ${defaultVarName}; exports.`);
+ this.tokens.copyToken();
+ this.tokens.appendCode(` = ${defaultVarName} =`);
+ this.reactHotLoaderTransformer.setExtractedDefaultExportName(defaultVarName);
+ } else {
+ // Change "export default E" to "exports.default = E"
+ this.tokens.replaceToken("exports.");
+ this.tokens.copyToken();
+ this.tokens.appendCode(" =");
+ }
+ if (exportedRuntimeValue) {
+ this.hadDefaultExport = true;
+ }
+ }
+
+ copyDecorators() {
+ while (this.tokens.matches1(tt.at)) {
+ this.tokens.copyToken();
+ if (this.tokens.matches1(tt.parenL)) {
+ this.tokens.copyExpectedToken(tt.parenL);
+ this.rootTransformer.processBalancedCode();
+ this.tokens.copyExpectedToken(tt.parenR);
+ } else {
+ this.tokens.copyExpectedToken(tt.name);
+ while (this.tokens.matches1(tt.dot)) {
+ this.tokens.copyExpectedToken(tt.dot);
+ this.tokens.copyExpectedToken(tt.name);
+ }
+ if (this.tokens.matches1(tt.parenL)) {
+ this.tokens.copyExpectedToken(tt.parenL);
+ this.rootTransformer.processBalancedCode();
+ this.tokens.copyExpectedToken(tt.parenR);
+ }
+ }
+ }
+ }
+
+ /**
+ * Transform a declaration like `export var`, `export let`, or `export const`.
+ */
+ processExportVar() {
+ if (this.isSimpleExportVar()) {
+ this.processSimpleExportVar();
+ } else {
+ this.processComplexExportVar();
+ }
+ }
+
+ /**
+ * Determine if the export is of the form:
+ * export var/let/const [varName] = [expr];
+ * In other words, determine if function name inference might apply.
+ */
+ isSimpleExportVar() {
+ let tokenIndex = this.tokens.currentIndex();
+ // export
+ tokenIndex++;
+ // var/let/const
+ tokenIndex++;
+ if (!this.tokens.matches1AtIndex(tokenIndex, tt.name)) {
+ return false;
+ }
+ tokenIndex++;
+ while (tokenIndex < this.tokens.tokens.length && this.tokens.tokens[tokenIndex].isType) {
+ tokenIndex++;
+ }
+ if (!this.tokens.matches1AtIndex(tokenIndex, tt.eq)) {
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Transform an `export var` declaration initializing a single variable.
+ *
+ * For example, this:
+ * export const f = () => {};
+ * becomes this:
+ * const f = () => {}; exports.f = f;
+ *
+ * The variable is unused (e.g. exports.f has the true value of the export).
+ * We need to produce an assignment of this form so that the function will
+ * have an inferred name of "f", which wouldn't happen in the more general
+ * case below.
+ */
+ processSimpleExportVar() {
+ // export
+ this.tokens.removeInitialToken();
+ // var/let/const
+ this.tokens.copyToken();
+ const varName = this.tokens.identifierName();
+ // x: number -> x
+ while (!this.tokens.matches1(tt.eq)) {
+ this.rootTransformer.processToken();
+ }
+ const endIndex = this.tokens.currentToken().rhsEndIndex;
+ if (endIndex == null) {
+ throw new Error("Expected = token with an end index.");
+ }
+ while (this.tokens.currentIndex() < endIndex) {
+ this.rootTransformer.processToken();
+ }
+ this.tokens.appendCode(`; exports.${varName} = ${varName}`);
+ }
+
+ /**
+ * Transform normal declaration exports, including handling destructuring.
+ * For example, this:
+ * export const {x: [a = 2, b], c} = d;
+ * becomes this:
+ * ({x: [exports.a = 2, exports.b], c: exports.c} = d;)
+ */
+ processComplexExportVar() {
+ this.tokens.removeInitialToken();
+ this.tokens.removeToken();
+ const needsParens = this.tokens.matches1(tt.braceL);
+ if (needsParens) {
+ this.tokens.appendCode("(");
+ }
+
+ let depth = 0;
+ while (true) {
+ if (
+ this.tokens.matches1(tt.braceL) ||
+ this.tokens.matches1(tt.dollarBraceL) ||
+ this.tokens.matches1(tt.bracketL)
+ ) {
+ depth++;
+ this.tokens.copyToken();
+ } else if (this.tokens.matches1(tt.braceR) || this.tokens.matches1(tt.bracketR)) {
+ depth--;
+ this.tokens.copyToken();
+ } else if (
+ depth === 0 &&
+ !this.tokens.matches1(tt.name) &&
+ !this.tokens.currentToken().isType
+ ) {
+ break;
+ } else if (this.tokens.matches1(tt.eq)) {
+ // Default values might have assignments in the RHS that we want to ignore, so skip past
+ // them.
+ const endIndex = this.tokens.currentToken().rhsEndIndex;
+ if (endIndex == null) {
+ throw new Error("Expected = token with an end index.");
+ }
+ while (this.tokens.currentIndex() < endIndex) {
+ this.rootTransformer.processToken();
+ }
+ } else {
+ const token = this.tokens.currentToken();
+ if (isDeclaration(token)) {
+ const name = this.tokens.identifierName();
+ let replacement = this.importProcessor.getIdentifierReplacement(name);
+ if (replacement === null) {
+ throw new Error(`Expected a replacement for ${name} in \`export var\` syntax.`);
+ }
+ if (isObjectShorthandDeclaration(token)) {
+ replacement = `${name}: ${replacement}`;
+ }
+ this.tokens.replaceToken(replacement);
+ } else {
+ this.rootTransformer.processToken();
+ }
+ }
+ }
+
+ if (needsParens) {
+ // Seek to the end of the RHS.
+ const endIndex = this.tokens.currentToken().rhsEndIndex;
+ if (endIndex == null) {
+ throw new Error("Expected = token with an end index.");
+ }
+ while (this.tokens.currentIndex() < endIndex) {
+ this.rootTransformer.processToken();
+ }
+ this.tokens.appendCode(")");
+ }
+ }
+
+ /**
+ * Transform this:
+ * export function foo() {}
+ * into this:
+ * function foo() {} exports.foo = foo;
+ */
+ processExportFunction() {
+ this.tokens.replaceToken("");
+ const name = this.processNamedFunction();
+ this.tokens.appendCode(` exports.${name} = ${name};`);
+ }
+
+ /**
+ * Skip past a function with a name and return that name.
+ */
+ processNamedFunction() {
+ if (this.tokens.matches1(tt._function)) {
+ this.tokens.copyToken();
+ } else if (this.tokens.matches2(tt.name, tt._function)) {
+ if (!this.tokens.matchesContextual(ContextualKeyword._async)) {
+ throw new Error("Expected async keyword in function export.");
+ }
+ this.tokens.copyToken();
+ this.tokens.copyToken();
+ }
+ if (this.tokens.matches1(tt.star)) {
+ this.tokens.copyToken();
+ }
+ if (!this.tokens.matches1(tt.name)) {
+ throw new Error("Expected identifier for exported function name.");
+ }
+ const name = this.tokens.identifierName();
+ this.tokens.copyToken();
+ if (this.tokens.currentToken().isType) {
+ this.tokens.removeInitialToken();
+ while (this.tokens.currentToken().isType) {
+ this.tokens.removeToken();
+ }
+ }
+ this.tokens.copyExpectedToken(tt.parenL);
+ this.rootTransformer.processBalancedCode();
+ this.tokens.copyExpectedToken(tt.parenR);
+ this.rootTransformer.processPossibleTypeRange();
+ this.tokens.copyExpectedToken(tt.braceL);
+ this.rootTransformer.processBalancedCode();
+ this.tokens.copyExpectedToken(tt.braceR);
+ return name;
+ }
+
+ /**
+ * Transform this:
+ * export class A {}
+ * into this:
+ * class A {} exports.A = A;
+ */
+ processExportClass() {
+ this.tokens.removeInitialToken();
+ this.copyDecorators();
+ if (this.tokens.matches1(tt._abstract)) {
+ this.tokens.removeToken();
+ }
+ const name = this.rootTransformer.processNamedClass();
+ this.tokens.appendCode(` exports.${name} = ${name};`);
+ }
+
+ /**
+ * Transform this:
+ * export {a, b as c};
+ * into this:
+ * exports.a = a; exports.c = b;
+ *
+ * OR
+ *
+ * Transform this:
+ * export {a, b as c} from './foo';
+ * into the pre-generated Object.defineProperty code from the ImportProcessor.
+ *
+ * For the first case, if the TypeScript transform is enabled, we need to skip
+ * exports that are only defined as types.
+ */
+ processExportBindings() {
+ this.tokens.removeInitialToken();
+ this.tokens.removeToken();
+
+ const isReExport = isExportFrom(this.tokens);
+
+ const exportStatements = [];
+ while (true) {
+ if (this.tokens.matches1(tt.braceR)) {
+ this.tokens.removeToken();
+ break;
+ }
+
+ const specifierInfo = getImportExportSpecifierInfo(this.tokens);
+
+ while (this.tokens.currentIndex() < specifierInfo.endIndex) {
+ this.tokens.removeToken();
+ }
+
+ const shouldRemoveExport =
+ specifierInfo.isType ||
+ (!isReExport && this.shouldElideExportedIdentifier(specifierInfo.leftName));
+ if (!shouldRemoveExport) {
+ const exportedName = specifierInfo.rightName;
+ if (exportedName === "default") {
+ this.hadDefaultExport = true;
+ } else {
+ this.hadNamedExport = true;
+ }
+ const localName = specifierInfo.leftName;
+ const newLocalName = this.importProcessor.getIdentifierReplacement(localName);
+ exportStatements.push(`exports.${exportedName} = ${newLocalName || localName};`);
+ }
+
+ if (this.tokens.matches1(tt.braceR)) {
+ this.tokens.removeToken();
+ break;
+ }
+ if (this.tokens.matches2(tt.comma, tt.braceR)) {
+ this.tokens.removeToken();
+ this.tokens.removeToken();
+ break;
+ } else if (this.tokens.matches1(tt.comma)) {
+ this.tokens.removeToken();
+ } else {
+ throw new Error(`Unexpected token: ${JSON.stringify(this.tokens.currentToken())}`);
+ }
+ }
+
+ if (this.tokens.matchesContextual(ContextualKeyword._from)) {
+ // This is an export...from, so throw away the normal named export code
+ // and use the Object.defineProperty code from ImportProcessor.
+ this.tokens.removeToken();
+ const path = this.tokens.stringValue();
+ this.tokens.replaceTokenTrimmingLeftWhitespace(this.importProcessor.claimImportCode(path));
+ removeMaybeImportAttributes(this.tokens);
+ } else {
+ // This is a normal named export, so use that.
+ this.tokens.appendCode(exportStatements.join(" "));
+ }
+
+ if (this.tokens.matches1(tt.semi)) {
+ this.tokens.removeToken();
+ }
+ }
+
+ processExportStar() {
+ this.tokens.removeInitialToken();
+ while (!this.tokens.matches1(tt.string)) {
+ this.tokens.removeToken();
+ }
+ const path = this.tokens.stringValue();
+ this.tokens.replaceTokenTrimmingLeftWhitespace(this.importProcessor.claimImportCode(path));
+ removeMaybeImportAttributes(this.tokens);
+ if (this.tokens.matches1(tt.semi)) {
+ this.tokens.removeToken();
+ }
+ }
+
+ shouldElideExportedIdentifier(name) {
+ return (
+ this.isTypeScriptTransformEnabled &&
+ !this.keepUnusedImports &&
+ !this.declarationInfo.valueDeclarations.has(name)
+ );
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/transformers/ESMImportTransformer.js b/node_modules/sucrase/dist/esm/transformers/ESMImportTransformer.js
new file mode 100644
index 0000000..b45cd3e
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/transformers/ESMImportTransformer.js
@@ -0,0 +1,415 @@
+
+
+
+import {ContextualKeyword} from "../parser/tokenizer/keywords";
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+import elideImportEquals from "../util/elideImportEquals";
+import getDeclarationInfo, {
+
+ EMPTY_DECLARATION_INFO,
+} from "../util/getDeclarationInfo";
+import getImportExportSpecifierInfo from "../util/getImportExportSpecifierInfo";
+import {getNonTypeIdentifiers} from "../util/getNonTypeIdentifiers";
+import isExportFrom from "../util/isExportFrom";
+import {removeMaybeImportAttributes} from "../util/removeMaybeImportAttributes";
+import shouldElideDefaultExport from "../util/shouldElideDefaultExport";
+
+import Transformer from "./Transformer";
+
+/**
+ * Class for editing import statements when we are keeping the code as ESM. We still need to remove
+ * type-only imports in TypeScript and Flow.
+ */
+export default class ESMImportTransformer extends Transformer {
+
+
+
+
+ constructor(
+ tokens,
+ nameManager,
+ helperManager,
+ reactHotLoaderTransformer,
+ isTypeScriptTransformEnabled,
+ isFlowTransformEnabled,
+ keepUnusedImports,
+ options,
+ ) {
+ super();this.tokens = tokens;this.nameManager = nameManager;this.helperManager = helperManager;this.reactHotLoaderTransformer = reactHotLoaderTransformer;this.isTypeScriptTransformEnabled = isTypeScriptTransformEnabled;this.isFlowTransformEnabled = isFlowTransformEnabled;this.keepUnusedImports = keepUnusedImports;;
+ this.nonTypeIdentifiers =
+ isTypeScriptTransformEnabled && !keepUnusedImports
+ ? getNonTypeIdentifiers(tokens, options)
+ : new Set();
+ this.declarationInfo =
+ isTypeScriptTransformEnabled && !keepUnusedImports
+ ? getDeclarationInfo(tokens)
+ : EMPTY_DECLARATION_INFO;
+ this.injectCreateRequireForImportRequire = Boolean(options.injectCreateRequireForImportRequire);
+ }
+
+ process() {
+ // TypeScript `import foo = require('foo');` should always just be translated to plain require.
+ if (this.tokens.matches3(tt._import, tt.name, tt.eq)) {
+ return this.processImportEquals();
+ }
+ if (
+ this.tokens.matches4(tt._import, tt.name, tt.name, tt.eq) &&
+ this.tokens.matchesContextualAtIndex(this.tokens.currentIndex() + 1, ContextualKeyword._type)
+ ) {
+ // import type T = require('T')
+ this.tokens.removeInitialToken();
+ // This construct is always exactly 8 tokens long, so remove the 7 remaining tokens.
+ for (let i = 0; i < 7; i++) {
+ this.tokens.removeToken();
+ }
+ return true;
+ }
+ if (this.tokens.matches2(tt._export, tt.eq)) {
+ this.tokens.replaceToken("module.exports");
+ return true;
+ }
+ if (
+ this.tokens.matches5(tt._export, tt._import, tt.name, tt.name, tt.eq) &&
+ this.tokens.matchesContextualAtIndex(this.tokens.currentIndex() + 2, ContextualKeyword._type)
+ ) {
+ // export import type T = require('T')
+ this.tokens.removeInitialToken();
+ // This construct is always exactly 9 tokens long, so remove the 8 remaining tokens.
+ for (let i = 0; i < 8; i++) {
+ this.tokens.removeToken();
+ }
+ return true;
+ }
+ if (this.tokens.matches1(tt._import)) {
+ return this.processImport();
+ }
+ if (this.tokens.matches2(tt._export, tt._default)) {
+ return this.processExportDefault();
+ }
+ if (this.tokens.matches2(tt._export, tt.braceL)) {
+ return this.processNamedExports();
+ }
+ if (
+ this.tokens.matches2(tt._export, tt.name) &&
+ this.tokens.matchesContextualAtIndex(this.tokens.currentIndex() + 1, ContextualKeyword._type)
+ ) {
+ // export type {a};
+ // export type {a as b};
+ // export type {a} from './b';
+ // export type * from './b';
+ // export type * as ns from './b';
+ this.tokens.removeInitialToken();
+ this.tokens.removeToken();
+ if (this.tokens.matches1(tt.braceL)) {
+ while (!this.tokens.matches1(tt.braceR)) {
+ this.tokens.removeToken();
+ }
+ this.tokens.removeToken();
+ } else {
+ // *
+ this.tokens.removeToken();
+ if (this.tokens.matches1(tt._as)) {
+ // as
+ this.tokens.removeToken();
+ // ns
+ this.tokens.removeToken();
+ }
+ }
+ // Remove type re-export `... } from './T'`
+ if (
+ this.tokens.matchesContextual(ContextualKeyword._from) &&
+ this.tokens.matches1AtIndex(this.tokens.currentIndex() + 1, tt.string)
+ ) {
+ this.tokens.removeToken();
+ this.tokens.removeToken();
+ removeMaybeImportAttributes(this.tokens);
+ }
+ return true;
+ }
+ return false;
+ }
+
+ processImportEquals() {
+ const importName = this.tokens.identifierNameAtIndex(this.tokens.currentIndex() + 1);
+ if (this.shouldAutomaticallyElideImportedName(importName)) {
+ // If this name is only used as a type, elide the whole import.
+ elideImportEquals(this.tokens);
+ } else if (this.injectCreateRequireForImportRequire) {
+ // We're using require in an environment (Node ESM) that doesn't provide
+ // it as a global, so generate a helper to import it.
+ // import -> const
+ this.tokens.replaceToken("const");
+ // Foo
+ this.tokens.copyToken();
+ // =
+ this.tokens.copyToken();
+ // require
+ this.tokens.replaceToken(this.helperManager.getHelperName("require"));
+ } else {
+ // Otherwise, just switch `import` to `const`.
+ this.tokens.replaceToken("const");
+ }
+ return true;
+ }
+
+ processImport() {
+ if (this.tokens.matches2(tt._import, tt.parenL)) {
+ // Dynamic imports don't need to be transformed.
+ return false;
+ }
+
+ const snapshot = this.tokens.snapshot();
+ const allImportsRemoved = this.removeImportTypeBindings();
+ if (allImportsRemoved) {
+ this.tokens.restoreToSnapshot(snapshot);
+ while (!this.tokens.matches1(tt.string)) {
+ this.tokens.removeToken();
+ }
+ this.tokens.removeToken();
+ removeMaybeImportAttributes(this.tokens);
+ if (this.tokens.matches1(tt.semi)) {
+ this.tokens.removeToken();
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Remove type bindings from this import, leaving the rest of the import intact.
+ *
+ * Return true if this import was ONLY types, and thus is eligible for removal. This will bail out
+ * of the replacement operation, so we can return early here.
+ */
+ removeImportTypeBindings() {
+ this.tokens.copyExpectedToken(tt._import);
+ if (
+ this.tokens.matchesContextual(ContextualKeyword._type) &&
+ !this.tokens.matches1AtIndex(this.tokens.currentIndex() + 1, tt.comma) &&
+ !this.tokens.matchesContextualAtIndex(this.tokens.currentIndex() + 1, ContextualKeyword._from)
+ ) {
+ // This is an "import type" statement, so exit early.
+ return true;
+ }
+
+ if (this.tokens.matches1(tt.string)) {
+ // This is a bare import, so we should proceed with the import.
+ this.tokens.copyToken();
+ return false;
+ }
+
+ // Skip the "module" token in import reflection.
+ if (
+ this.tokens.matchesContextual(ContextualKeyword._module) &&
+ this.tokens.matchesContextualAtIndex(this.tokens.currentIndex() + 2, ContextualKeyword._from)
+ ) {
+ this.tokens.copyToken();
+ }
+
+ let foundNonTypeImport = false;
+ let foundAnyNamedImport = false;
+ let needsComma = false;
+
+ // Handle default import.
+ if (this.tokens.matches1(tt.name)) {
+ if (this.shouldAutomaticallyElideImportedName(this.tokens.identifierName())) {
+ this.tokens.removeToken();
+ if (this.tokens.matches1(tt.comma)) {
+ this.tokens.removeToken();
+ }
+ } else {
+ foundNonTypeImport = true;
+ this.tokens.copyToken();
+ if (this.tokens.matches1(tt.comma)) {
+ // We're in a statement like:
+ // import A, * as B from './A';
+ // or
+ // import A, {foo} from './A';
+ // where the `A` is being kept. The comma should be removed if an only
+ // if the next part of the import statement is elided, but that's hard
+ // to determine at this point in the code. Instead, always remove it
+ // and set a flag to add it back if necessary.
+ needsComma = true;
+ this.tokens.removeToken();
+ }
+ }
+ }
+
+ if (this.tokens.matches1(tt.star)) {
+ if (this.shouldAutomaticallyElideImportedName(this.tokens.identifierNameAtRelativeIndex(2))) {
+ this.tokens.removeToken();
+ this.tokens.removeToken();
+ this.tokens.removeToken();
+ } else {
+ if (needsComma) {
+ this.tokens.appendCode(",");
+ }
+ foundNonTypeImport = true;
+ this.tokens.copyExpectedToken(tt.star);
+ this.tokens.copyExpectedToken(tt.name);
+ this.tokens.copyExpectedToken(tt.name);
+ }
+ } else if (this.tokens.matches1(tt.braceL)) {
+ if (needsComma) {
+ this.tokens.appendCode(",");
+ }
+ this.tokens.copyToken();
+ while (!this.tokens.matches1(tt.braceR)) {
+ foundAnyNamedImport = true;
+ const specifierInfo = getImportExportSpecifierInfo(this.tokens);
+ if (
+ specifierInfo.isType ||
+ this.shouldAutomaticallyElideImportedName(specifierInfo.rightName)
+ ) {
+ while (this.tokens.currentIndex() < specifierInfo.endIndex) {
+ this.tokens.removeToken();
+ }
+ if (this.tokens.matches1(tt.comma)) {
+ this.tokens.removeToken();
+ }
+ } else {
+ foundNonTypeImport = true;
+ while (this.tokens.currentIndex() < specifierInfo.endIndex) {
+ this.tokens.copyToken();
+ }
+ if (this.tokens.matches1(tt.comma)) {
+ this.tokens.copyToken();
+ }
+ }
+ }
+ this.tokens.copyExpectedToken(tt.braceR);
+ }
+
+ if (this.keepUnusedImports) {
+ return false;
+ }
+ if (this.isTypeScriptTransformEnabled) {
+ return !foundNonTypeImport;
+ } else if (this.isFlowTransformEnabled) {
+ // In Flow, unlike TS, `import {} from 'foo';` preserves the import.
+ return foundAnyNamedImport && !foundNonTypeImport;
+ } else {
+ return false;
+ }
+ }
+
+ shouldAutomaticallyElideImportedName(name) {
+ return (
+ this.isTypeScriptTransformEnabled &&
+ !this.keepUnusedImports &&
+ !this.nonTypeIdentifiers.has(name)
+ );
+ }
+
+ processExportDefault() {
+ if (
+ shouldElideDefaultExport(
+ this.isTypeScriptTransformEnabled,
+ this.keepUnusedImports,
+ this.tokens,
+ this.declarationInfo,
+ )
+ ) {
+ // If the exported value is just an identifier and should be elided by TypeScript
+ // rules, then remove it entirely. It will always have the form `export default e`,
+ // where `e` is an identifier.
+ this.tokens.removeInitialToken();
+ this.tokens.removeToken();
+ this.tokens.removeToken();
+ return true;
+ }
+
+ const alreadyHasName =
+ this.tokens.matches4(tt._export, tt._default, tt._function, tt.name) ||
+ // export default async function
+ (this.tokens.matches5(tt._export, tt._default, tt.name, tt._function, tt.name) &&
+ this.tokens.matchesContextualAtIndex(
+ this.tokens.currentIndex() + 2,
+ ContextualKeyword._async,
+ )) ||
+ this.tokens.matches4(tt._export, tt._default, tt._class, tt.name) ||
+ this.tokens.matches5(tt._export, tt._default, tt._abstract, tt._class, tt.name);
+
+ if (!alreadyHasName && this.reactHotLoaderTransformer) {
+ // This is a plain "export default E" statement and we need to assign E to a variable.
+ // Change "export default E" to "let _default; export default _default = E"
+ const defaultVarName = this.nameManager.claimFreeName("_default");
+ this.tokens.replaceToken(`let ${defaultVarName}; export`);
+ this.tokens.copyToken();
+ this.tokens.appendCode(` ${defaultVarName} =`);
+ this.reactHotLoaderTransformer.setExtractedDefaultExportName(defaultVarName);
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Handle a statement with one of these forms:
+ * export {a, type b};
+ * export {c, type d} from 'foo';
+ *
+ * In both cases, any explicit type exports should be removed. In the first
+ * case, we also need to handle implicit export elision for names declared as
+ * types. In the second case, we must NOT do implicit named export elision,
+ * but we must remove the runtime import if all exports are type exports.
+ */
+ processNamedExports() {
+ if (!this.isTypeScriptTransformEnabled) {
+ return false;
+ }
+ this.tokens.copyExpectedToken(tt._export);
+ this.tokens.copyExpectedToken(tt.braceL);
+
+ const isReExport = isExportFrom(this.tokens);
+ let foundNonTypeExport = false;
+ while (!this.tokens.matches1(tt.braceR)) {
+ const specifierInfo = getImportExportSpecifierInfo(this.tokens);
+ if (
+ specifierInfo.isType ||
+ (!isReExport && this.shouldElideExportedName(specifierInfo.leftName))
+ ) {
+ // Type export, so remove all tokens, including any comma.
+ while (this.tokens.currentIndex() < specifierInfo.endIndex) {
+ this.tokens.removeToken();
+ }
+ if (this.tokens.matches1(tt.comma)) {
+ this.tokens.removeToken();
+ }
+ } else {
+ // Non-type export, so copy all tokens, including any comma.
+ foundNonTypeExport = true;
+ while (this.tokens.currentIndex() < specifierInfo.endIndex) {
+ this.tokens.copyToken();
+ }
+ if (this.tokens.matches1(tt.comma)) {
+ this.tokens.copyToken();
+ }
+ }
+ }
+ this.tokens.copyExpectedToken(tt.braceR);
+
+ if (!this.keepUnusedImports && isReExport && !foundNonTypeExport) {
+ // This is a type-only re-export, so skip evaluating the other module. Technically this
+ // leaves the statement as `export {}`, but that's ok since that's a no-op.
+ this.tokens.removeToken();
+ this.tokens.removeToken();
+ removeMaybeImportAttributes(this.tokens);
+ }
+
+ return true;
+ }
+
+ /**
+ * ESM elides all imports with the rule that we only elide if we see that it's
+ * a type and never see it as a value. This is in contrast to CJS, which
+ * elides imports that are completely unknown.
+ */
+ shouldElideExportedName(name) {
+ return (
+ this.isTypeScriptTransformEnabled &&
+ !this.keepUnusedImports &&
+ this.declarationInfo.typeDeclarations.has(name) &&
+ !this.declarationInfo.valueDeclarations.has(name)
+ );
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/transformers/FlowTransformer.js b/node_modules/sucrase/dist/esm/transformers/FlowTransformer.js
new file mode 100644
index 0000000..7df0aca
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/transformers/FlowTransformer.js
@@ -0,0 +1,182 @@
+import {ContextualKeyword} from "../parser/tokenizer/keywords";
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+
+import Transformer from "./Transformer";
+
+export default class FlowTransformer extends Transformer {
+ constructor(
+ rootTransformer,
+ tokens,
+ isImportsTransformEnabled,
+ ) {
+ super();this.rootTransformer = rootTransformer;this.tokens = tokens;this.isImportsTransformEnabled = isImportsTransformEnabled;;
+ }
+
+ process() {
+ if (
+ this.rootTransformer.processPossibleArrowParamEnd() ||
+ this.rootTransformer.processPossibleAsyncArrowWithTypeParams() ||
+ this.rootTransformer.processPossibleTypeRange()
+ ) {
+ return true;
+ }
+ if (this.tokens.matches1(tt._enum)) {
+ this.processEnum();
+ return true;
+ }
+ if (this.tokens.matches2(tt._export, tt._enum)) {
+ this.processNamedExportEnum();
+ return true;
+ }
+ if (this.tokens.matches3(tt._export, tt._default, tt._enum)) {
+ this.processDefaultExportEnum();
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Handle a declaration like:
+ * export enum E ...
+ *
+ * With this imports transform, this becomes:
+ * const E = [[enum]]; exports.E = E;
+ *
+ * otherwise, it becomes:
+ * export const E = [[enum]];
+ */
+ processNamedExportEnum() {
+ if (this.isImportsTransformEnabled) {
+ // export
+ this.tokens.removeInitialToken();
+ const enumName = this.tokens.identifierNameAtRelativeIndex(1);
+ this.processEnum();
+ this.tokens.appendCode(` exports.${enumName} = ${enumName};`);
+ } else {
+ this.tokens.copyToken();
+ this.processEnum();
+ }
+ }
+
+ /**
+ * Handle a declaration like:
+ * export default enum E
+ *
+ * With the imports transform, this becomes:
+ * const E = [[enum]]; exports.default = E;
+ *
+ * otherwise, it becomes:
+ * const E = [[enum]]; export default E;
+ */
+ processDefaultExportEnum() {
+ // export
+ this.tokens.removeInitialToken();
+ // default
+ this.tokens.removeToken();
+ const enumName = this.tokens.identifierNameAtRelativeIndex(1);
+ this.processEnum();
+ if (this.isImportsTransformEnabled) {
+ this.tokens.appendCode(` exports.default = ${enumName};`);
+ } else {
+ this.tokens.appendCode(` export default ${enumName};`);
+ }
+ }
+
+ /**
+ * Transpile flow enums to invoke the "flow-enums-runtime" library.
+ *
+ * Currently, the transpiled code always uses `require("flow-enums-runtime")`,
+ * but if future flexibility is needed, we could expose a config option for
+ * this string (similar to configurable JSX). Even when targeting ESM, the
+ * default behavior of babel-plugin-transform-flow-enums is to use require
+ * rather than injecting an import.
+ *
+ * Flow enums are quite a bit simpler than TS enums and have some convenient
+ * constraints:
+ * - Element initializers must be either always present or always absent. That
+ * means that we can use fixed lookahead on the first element (if any) and
+ * assume that all elements are like that.
+ * - The right-hand side of an element initializer must be a literal value,
+ * not a complex expression and not referencing other elements. That means
+ * we can simply copy a single token.
+ *
+ * Enums can be broken up into three basic cases:
+ *
+ * Mirrored enums:
+ * enum E {A, B}
+ * ->
+ * const E = require("flow-enums-runtime").Mirrored(["A", "B"]);
+ *
+ * Initializer enums:
+ * enum E {A = 1, B = 2}
+ * ->
+ * const E = require("flow-enums-runtime")({A: 1, B: 2});
+ *
+ * Symbol enums:
+ * enum E of symbol {A, B}
+ * ->
+ * const E = require("flow-enums-runtime")({A: Symbol("A"), B: Symbol("B")});
+ *
+ * We can statically detect which of the three cases this is by looking at the
+ * "of" declaration (if any) and seeing if the first element has an initializer.
+ * Since the other transform details are so similar between the three cases, we
+ * use a single implementation and vary the transform within processEnumElement
+ * based on case.
+ */
+ processEnum() {
+ // enum E -> const E
+ this.tokens.replaceToken("const");
+ this.tokens.copyExpectedToken(tt.name);
+
+ let isSymbolEnum = false;
+ if (this.tokens.matchesContextual(ContextualKeyword._of)) {
+ this.tokens.removeToken();
+ isSymbolEnum = this.tokens.matchesContextual(ContextualKeyword._symbol);
+ this.tokens.removeToken();
+ }
+ const hasInitializers = this.tokens.matches3(tt.braceL, tt.name, tt.eq);
+ this.tokens.appendCode(' = require("flow-enums-runtime")');
+
+ const isMirrored = !isSymbolEnum && !hasInitializers;
+ this.tokens.replaceTokenTrimmingLeftWhitespace(isMirrored ? ".Mirrored([" : "({");
+
+ while (!this.tokens.matches1(tt.braceR)) {
+ // ... is allowed at the end and has no runtime behavior.
+ if (this.tokens.matches1(tt.ellipsis)) {
+ this.tokens.removeToken();
+ break;
+ }
+ this.processEnumElement(isSymbolEnum, hasInitializers);
+ if (this.tokens.matches1(tt.comma)) {
+ this.tokens.copyToken();
+ }
+ }
+
+ this.tokens.replaceToken(isMirrored ? "]);" : "});");
+ }
+
+ /**
+ * Process an individual enum element, producing either an array element or an
+ * object element based on what type of enum this is.
+ */
+ processEnumElement(isSymbolEnum, hasInitializers) {
+ if (isSymbolEnum) {
+ // Symbol enums never have initializers and are expanded to object elements.
+ // A, -> A: Symbol("A"),
+ const elementName = this.tokens.identifierName();
+ this.tokens.copyToken();
+ this.tokens.appendCode(`: Symbol("${elementName}")`);
+ } else if (hasInitializers) {
+ // Initializers are expanded to object elements.
+ // A = 1, -> A: 1,
+ this.tokens.copyToken();
+ this.tokens.replaceTokenTrimmingLeftWhitespace(":");
+ this.tokens.copyToken();
+ } else {
+ // Enum elements without initializers become string literal array elements.
+ // A, -> "A",
+ this.tokens.replaceToken(`"${this.tokens.identifierName()}"`);
+ }
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/transformers/JSXTransformer.js b/node_modules/sucrase/dist/esm/transformers/JSXTransformer.js
new file mode 100644
index 0000000..e5f5ae5
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/transformers/JSXTransformer.js
@@ -0,0 +1,733 @@
+
+
+
+import XHTMLEntities from "../parser/plugins/jsx/xhtml";
+import {JSXRole} from "../parser/tokenizer";
+import {TokenType as tt} from "../parser/tokenizer/types";
+import {charCodes} from "../parser/util/charcodes";
+
+import getJSXPragmaInfo, {} from "../util/getJSXPragmaInfo";
+
+import Transformer from "./Transformer";
+
+export default class JSXTransformer extends Transformer {
+
+
+
+
+ // State for calculating the line number of each JSX tag in development.
+ __init() {this.lastLineNumber = 1}
+ __init2() {this.lastIndex = 0}
+
+ // In development, variable name holding the name of the current file.
+ __init3() {this.filenameVarName = null}
+ // Mapping of claimed names for imports in the automatic transform, e,g.
+ // {jsx: "_jsx"}. This determines which imports to generate in the prefix.
+ __init4() {this.esmAutomaticImportNameResolutions = {}}
+ // When automatically adding imports in CJS mode, we store the variable name
+ // holding the imported CJS module so we can require it in the prefix.
+ __init5() {this.cjsAutomaticModuleNameResolutions = {}}
+
+ constructor(
+ rootTransformer,
+ tokens,
+ importProcessor,
+ nameManager,
+ options,
+ ) {
+ super();this.rootTransformer = rootTransformer;this.tokens = tokens;this.importProcessor = importProcessor;this.nameManager = nameManager;this.options = options;JSXTransformer.prototype.__init.call(this);JSXTransformer.prototype.__init2.call(this);JSXTransformer.prototype.__init3.call(this);JSXTransformer.prototype.__init4.call(this);JSXTransformer.prototype.__init5.call(this);;
+ this.jsxPragmaInfo = getJSXPragmaInfo(options);
+ this.isAutomaticRuntime = options.jsxRuntime === "automatic";
+ this.jsxImportSource = options.jsxImportSource || "react";
+ }
+
+ process() {
+ if (this.tokens.matches1(tt.jsxTagStart)) {
+ this.processJSXTag();
+ return true;
+ }
+ return false;
+ }
+
+ getPrefixCode() {
+ let prefix = "";
+ if (this.filenameVarName) {
+ prefix += `const ${this.filenameVarName} = ${JSON.stringify(this.options.filePath || "")};`;
+ }
+ if (this.isAutomaticRuntime) {
+ if (this.importProcessor) {
+ // CJS mode: emit require statements for all modules that were referenced.
+ for (const [path, resolvedName] of Object.entries(this.cjsAutomaticModuleNameResolutions)) {
+ prefix += `var ${resolvedName} = require("${path}");`;
+ }
+ } else {
+ // ESM mode: consolidate and emit import statements for referenced names.
+ const {createElement: createElementResolution, ...otherResolutions} =
+ this.esmAutomaticImportNameResolutions;
+ if (createElementResolution) {
+ prefix += `import {createElement as ${createElementResolution}} from "${this.jsxImportSource}";`;
+ }
+ const importSpecifiers = Object.entries(otherResolutions)
+ .map(([name, resolvedName]) => `${name} as ${resolvedName}`)
+ .join(", ");
+ if (importSpecifiers) {
+ const importPath =
+ this.jsxImportSource + (this.options.production ? "/jsx-runtime" : "/jsx-dev-runtime");
+ prefix += `import {${importSpecifiers}} from "${importPath}";`;
+ }
+ }
+ }
+ return prefix;
+ }
+
+ processJSXTag() {
+ const {jsxRole, start} = this.tokens.currentToken();
+ // Calculate line number information at the very start (if in development
+ // mode) so that the information is guaranteed to be queried in token order.
+ const elementLocationCode = this.options.production ? null : this.getElementLocationCode(start);
+ if (this.isAutomaticRuntime && jsxRole !== JSXRole.KeyAfterPropSpread) {
+ this.transformTagToJSXFunc(elementLocationCode, jsxRole);
+ } else {
+ this.transformTagToCreateElement(elementLocationCode);
+ }
+ }
+
+ getElementLocationCode(firstTokenStart) {
+ const lineNumber = this.getLineNumberForIndex(firstTokenStart);
+ return `lineNumber: ${lineNumber}`;
+ }
+
+ /**
+ * Get the line number for this source position. This is calculated lazily and
+ * must be called in increasing order by index.
+ */
+ getLineNumberForIndex(index) {
+ const code = this.tokens.code;
+ while (this.lastIndex < index && this.lastIndex < code.length) {
+ if (code[this.lastIndex] === "\n") {
+ this.lastLineNumber++;
+ }
+ this.lastIndex++;
+ }
+ return this.lastLineNumber;
+ }
+
+ /**
+ * Convert the current JSX element to a call to jsx, jsxs, or jsxDEV. This is
+ * the primary transformation for the automatic transform.
+ *
+ * Example:
+ * <div a={1} key={2}>Hello{x}</div>
+ * becomes
+ * jsxs('div', {a: 1, children: ["Hello", x]}, 2)
+ */
+ transformTagToJSXFunc(elementLocationCode, jsxRole) {
+ const isStatic = jsxRole === JSXRole.StaticChildren;
+ // First tag is always jsxTagStart.
+ this.tokens.replaceToken(this.getJSXFuncInvocationCode(isStatic));
+
+ let keyCode = null;
+ if (this.tokens.matches1(tt.jsxTagEnd)) {
+ // Fragment syntax.
+ this.tokens.replaceToken(`${this.getFragmentCode()}, {`);
+ this.processAutomaticChildrenAndEndProps(jsxRole);
+ } else {
+ // Normal open tag or self-closing tag.
+ this.processTagIntro();
+ this.tokens.appendCode(", {");
+ keyCode = this.processProps(true);
+
+ if (this.tokens.matches2(tt.slash, tt.jsxTagEnd)) {
+ // Self-closing tag, no children to add, so close the props.
+ this.tokens.appendCode("}");
+ } else if (this.tokens.matches1(tt.jsxTagEnd)) {
+ // Tag with children.
+ this.tokens.removeToken();
+ this.processAutomaticChildrenAndEndProps(jsxRole);
+ } else {
+ throw new Error("Expected either /> or > at the end of the tag.");
+ }
+ // If a key was present, move it to its own arg. Note that moving code
+ // like this will cause line numbers to get out of sync within the JSX
+ // element if the key expression has a newline in it. This is unfortunate,
+ // but hopefully should be rare.
+ if (keyCode) {
+ this.tokens.appendCode(`, ${keyCode}`);
+ }
+ }
+ if (!this.options.production) {
+ // If the key wasn't already added, add it now so we can correctly set
+ // positional args for jsxDEV.
+ if (keyCode === null) {
+ this.tokens.appendCode(", void 0");
+ }
+ this.tokens.appendCode(`, ${isStatic}, ${this.getDevSource(elementLocationCode)}, this`);
+ }
+ // We're at the close-tag or the end of a self-closing tag, so remove
+ // everything else and close the function call.
+ this.tokens.removeInitialToken();
+ while (!this.tokens.matches1(tt.jsxTagEnd)) {
+ this.tokens.removeToken();
+ }
+ this.tokens.replaceToken(")");
+ }
+
+ /**
+ * Convert the current JSX element to a createElement call. In the classic
+ * runtime, this is the only case. In the automatic runtime, this is called
+ * as a fallback in some situations.
+ *
+ * Example:
+ * <div a={1} key={2}>Hello{x}</div>
+ * becomes
+ * React.createElement('div', {a: 1, key: 2}, "Hello", x)
+ */
+ transformTagToCreateElement(elementLocationCode) {
+ // First tag is always jsxTagStart.
+ this.tokens.replaceToken(this.getCreateElementInvocationCode());
+
+ if (this.tokens.matches1(tt.jsxTagEnd)) {
+ // Fragment syntax.
+ this.tokens.replaceToken(`${this.getFragmentCode()}, null`);
+ this.processChildren(true);
+ } else {
+ // Normal open tag or self-closing tag.
+ this.processTagIntro();
+ this.processPropsObjectWithDevInfo(elementLocationCode);
+
+ if (this.tokens.matches2(tt.slash, tt.jsxTagEnd)) {
+ // Self-closing tag; no children to process.
+ } else if (this.tokens.matches1(tt.jsxTagEnd)) {
+ // Tag with children and a close-tag; process the children as args.
+ this.tokens.removeToken();
+ this.processChildren(true);
+ } else {
+ throw new Error("Expected either /> or > at the end of the tag.");
+ }
+ }
+ // We're at the close-tag or the end of a self-closing tag, so remove
+ // everything else and close the function call.
+ this.tokens.removeInitialToken();
+ while (!this.tokens.matches1(tt.jsxTagEnd)) {
+ this.tokens.removeToken();
+ }
+ this.tokens.replaceToken(")");
+ }
+
+ /**
+ * Get the code for the relevant function for this context: jsx, jsxs,
+ * or jsxDEV. The following open-paren is included as well.
+ *
+ * These functions are only used for the automatic runtime, so they are always
+ * auto-imported, but the auto-import will be either CJS or ESM based on the
+ * target module format.
+ */
+ getJSXFuncInvocationCode(isStatic) {
+ if (this.options.production) {
+ if (isStatic) {
+ return this.claimAutoImportedFuncInvocation("jsxs", "/jsx-runtime");
+ } else {
+ return this.claimAutoImportedFuncInvocation("jsx", "/jsx-runtime");
+ }
+ } else {
+ return this.claimAutoImportedFuncInvocation("jsxDEV", "/jsx-dev-runtime");
+ }
+ }
+
+ /**
+ * Return the code to use for the createElement function, e.g.
+ * `React.createElement`, including the following open-paren.
+ *
+ * This is the main function to use for the classic runtime. For the
+ * automatic runtime, this function is used as a fallback function to
+ * preserve behavior when there is a prop spread followed by an explicit
+ * key. In that automatic runtime case, the function should be automatically
+ * imported.
+ */
+ getCreateElementInvocationCode() {
+ if (this.isAutomaticRuntime) {
+ return this.claimAutoImportedFuncInvocation("createElement", "");
+ } else {
+ const {jsxPragmaInfo} = this;
+ const resolvedPragmaBaseName = this.importProcessor
+ ? this.importProcessor.getIdentifierReplacement(jsxPragmaInfo.base) || jsxPragmaInfo.base
+ : jsxPragmaInfo.base;
+ return `${resolvedPragmaBaseName}${jsxPragmaInfo.suffix}(`;
+ }
+ }
+
+ /**
+ * Return the code to use as the component when compiling a shorthand
+ * fragment, e.g. `React.Fragment`.
+ *
+ * This may be called from either the classic or automatic runtime, and
+ * the value should be auto-imported for the automatic runtime.
+ */
+ getFragmentCode() {
+ if (this.isAutomaticRuntime) {
+ return this.claimAutoImportedName(
+ "Fragment",
+ this.options.production ? "/jsx-runtime" : "/jsx-dev-runtime",
+ );
+ } else {
+ const {jsxPragmaInfo} = this;
+ const resolvedFragmentPragmaBaseName = this.importProcessor
+ ? this.importProcessor.getIdentifierReplacement(jsxPragmaInfo.fragmentBase) ||
+ jsxPragmaInfo.fragmentBase
+ : jsxPragmaInfo.fragmentBase;
+ return resolvedFragmentPragmaBaseName + jsxPragmaInfo.fragmentSuffix;
+ }
+ }
+
+ /**
+ * Return code that invokes the given function.
+ *
+ * When the imports transform is enabled, use the CJSImportTransformer
+ * strategy of using `.call(void 0, ...` to avoid passing a `this` value in a
+ * situation that would otherwise look like a method call.
+ */
+ claimAutoImportedFuncInvocation(funcName, importPathSuffix) {
+ const funcCode = this.claimAutoImportedName(funcName, importPathSuffix);
+ if (this.importProcessor) {
+ return `${funcCode}.call(void 0, `;
+ } else {
+ return `${funcCode}(`;
+ }
+ }
+
+ claimAutoImportedName(funcName, importPathSuffix) {
+ if (this.importProcessor) {
+ // CJS mode: claim a name for the module and mark it for import.
+ const path = this.jsxImportSource + importPathSuffix;
+ if (!this.cjsAutomaticModuleNameResolutions[path]) {
+ this.cjsAutomaticModuleNameResolutions[path] =
+ this.importProcessor.getFreeIdentifierForPath(path);
+ }
+ return `${this.cjsAutomaticModuleNameResolutions[path]}.${funcName}`;
+ } else {
+ // ESM mode: claim a name for this function and add it to the names that
+ // should be auto-imported when the prefix is generated.
+ if (!this.esmAutomaticImportNameResolutions[funcName]) {
+ this.esmAutomaticImportNameResolutions[funcName] = this.nameManager.claimFreeName(
+ `_${funcName}`,
+ );
+ }
+ return this.esmAutomaticImportNameResolutions[funcName];
+ }
+ }
+
+ /**
+ * Process the first part of a tag, before any props.
+ */
+ processTagIntro() {
+ // Walk forward until we see one of these patterns:
+ // jsxName to start the first prop, preceded by another jsxName to end the tag name.
+ // jsxName to start the first prop, preceded by greaterThan to end the type argument.
+ // [open brace] to start the first prop.
+ // [jsxTagEnd] to end the open-tag.
+ // [slash, jsxTagEnd] to end the self-closing tag.
+ let introEnd = this.tokens.currentIndex() + 1;
+ while (
+ this.tokens.tokens[introEnd].isType ||
+ (!this.tokens.matches2AtIndex(introEnd - 1, tt.jsxName, tt.jsxName) &&
+ !this.tokens.matches2AtIndex(introEnd - 1, tt.greaterThan, tt.jsxName) &&
+ !this.tokens.matches1AtIndex(introEnd, tt.braceL) &&
+ !this.tokens.matches1AtIndex(introEnd, tt.jsxTagEnd) &&
+ !this.tokens.matches2AtIndex(introEnd, tt.slash, tt.jsxTagEnd))
+ ) {
+ introEnd++;
+ }
+ if (introEnd === this.tokens.currentIndex() + 1) {
+ const tagName = this.tokens.identifierName();
+ if (startsWithLowerCase(tagName)) {
+ this.tokens.replaceToken(`'${tagName}'`);
+ }
+ }
+ while (this.tokens.currentIndex() < introEnd) {
+ this.rootTransformer.processToken();
+ }
+ }
+
+ /**
+ * Starting at the beginning of the props, add the props argument to
+ * React.createElement, including the comma before it.
+ */
+ processPropsObjectWithDevInfo(elementLocationCode) {
+ const devProps = this.options.production
+ ? ""
+ : `__self: this, __source: ${this.getDevSource(elementLocationCode)}`;
+ if (!this.tokens.matches1(tt.jsxName) && !this.tokens.matches1(tt.braceL)) {
+ if (devProps) {
+ this.tokens.appendCode(`, {${devProps}}`);
+ } else {
+ this.tokens.appendCode(`, null`);
+ }
+ return;
+ }
+ this.tokens.appendCode(`, {`);
+ this.processProps(false);
+ if (devProps) {
+ this.tokens.appendCode(` ${devProps}}`);
+ } else {
+ this.tokens.appendCode("}");
+ }
+ }
+
+ /**
+ * Transform the core part of the props, assuming that a { has already been
+ * inserted before us and that a } will be inserted after us.
+ *
+ * If extractKeyCode is true (i.e. when using any jsx... function), any prop
+ * named "key" has its code captured and returned rather than being emitted to
+ * the output code. This shifts line numbers, and emitting the code later will
+ * correct line numbers again. If no key is found or if extractKeyCode is
+ * false, this function returns null.
+ */
+ processProps(extractKeyCode) {
+ let keyCode = null;
+ while (true) {
+ if (this.tokens.matches2(tt.jsxName, tt.eq)) {
+ // This is a regular key={value} or key="value" prop.
+ const propName = this.tokens.identifierName();
+ if (extractKeyCode && propName === "key") {
+ if (keyCode !== null) {
+ // The props list has multiple keys. Different implementations are
+ // inconsistent about what to do here: as of this writing, Babel and
+ // swc keep the *last* key and completely remove the rest, while
+ // TypeScript uses the *first* key and leaves the others as regular
+ // props. The React team collaborated with Babel on the
+ // implementation of this behavior, so presumably the Babel behavior
+ // is the one to use.
+ // Since we won't ever be emitting the previous key code, we need to
+ // at least emit its newlines here so that the line numbers match up
+ // in the long run.
+ this.tokens.appendCode(keyCode.replace(/[^\n]/g, ""));
+ }
+ // key
+ this.tokens.removeToken();
+ // =
+ this.tokens.removeToken();
+ const snapshot = this.tokens.snapshot();
+ this.processPropValue();
+ keyCode = this.tokens.dangerouslyGetAndRemoveCodeSinceSnapshot(snapshot);
+ // Don't add a comma
+ continue;
+ } else {
+ this.processPropName(propName);
+ this.tokens.replaceToken(": ");
+ this.processPropValue();
+ }
+ } else if (this.tokens.matches1(tt.jsxName)) {
+ // This is a shorthand prop like <input disabled />.
+ const propName = this.tokens.identifierName();
+ this.processPropName(propName);
+ this.tokens.appendCode(": true");
+ } else if (this.tokens.matches1(tt.braceL)) {
+ // This is prop spread, like <div {...getProps()}>, which we can pass
+ // through fairly directly as an object spread.
+ this.tokens.replaceToken("");
+ this.rootTransformer.processBalancedCode();
+ this.tokens.replaceToken("");
+ } else {
+ break;
+ }
+ this.tokens.appendCode(",");
+ }
+ return keyCode;
+ }
+
+ processPropName(propName) {
+ if (propName.includes("-")) {
+ this.tokens.replaceToken(`'${propName}'`);
+ } else {
+ this.tokens.copyToken();
+ }
+ }
+
+ processPropValue() {
+ if (this.tokens.matches1(tt.braceL)) {
+ this.tokens.replaceToken("");
+ this.rootTransformer.processBalancedCode();
+ this.tokens.replaceToken("");
+ } else if (this.tokens.matches1(tt.jsxTagStart)) {
+ this.processJSXTag();
+ } else {
+ this.processStringPropValue();
+ }
+ }
+
+ processStringPropValue() {
+ const token = this.tokens.currentToken();
+ const valueCode = this.tokens.code.slice(token.start + 1, token.end - 1);
+ const replacementCode = formatJSXTextReplacement(valueCode);
+ const literalCode = formatJSXStringValueLiteral(valueCode);
+ this.tokens.replaceToken(literalCode + replacementCode);
+ }
+
+ /**
+ * Starting in the middle of the props object literal, produce an additional
+ * prop for the children and close the object literal.
+ */
+ processAutomaticChildrenAndEndProps(jsxRole) {
+ if (jsxRole === JSXRole.StaticChildren) {
+ this.tokens.appendCode(" children: [");
+ this.processChildren(false);
+ this.tokens.appendCode("]}");
+ } else {
+ // The parser information tells us whether we will see a real child or if
+ // all remaining children (if any) will resolve to empty. If there are no
+ // non-empty children, don't emit a children prop at all, but still
+ // process children so that we properly transform the code into nothing.
+ if (jsxRole === JSXRole.OneChild) {
+ this.tokens.appendCode(" children: ");
+ }
+ this.processChildren(false);
+ this.tokens.appendCode("}");
+ }
+ }
+
+ /**
+ * Transform children into a comma-separated list, which will be either
+ * arguments to createElement or array elements of a children prop.
+ */
+ processChildren(needsInitialComma) {
+ let needsComma = needsInitialComma;
+ while (true) {
+ if (this.tokens.matches2(tt.jsxTagStart, tt.slash)) {
+ // Closing tag, so no more children.
+ return;
+ }
+ let didEmitElement = false;
+ if (this.tokens.matches1(tt.braceL)) {
+ if (this.tokens.matches2(tt.braceL, tt.braceR)) {
+ // Empty interpolations and comment-only interpolations are allowed
+ // and don't create an extra child arg.
+ this.tokens.replaceToken("");
+ this.tokens.replaceToken("");
+ } else {
+ // Interpolated expression.
+ this.tokens.replaceToken(needsComma ? ", " : "");
+ this.rootTransformer.processBalancedCode();
+ this.tokens.replaceToken("");
+ didEmitElement = true;
+ }
+ } else if (this.tokens.matches1(tt.jsxTagStart)) {
+ // Child JSX element
+ this.tokens.appendCode(needsComma ? ", " : "");
+ this.processJSXTag();
+ didEmitElement = true;
+ } else if (this.tokens.matches1(tt.jsxText) || this.tokens.matches1(tt.jsxEmptyText)) {
+ didEmitElement = this.processChildTextElement(needsComma);
+ } else {
+ throw new Error("Unexpected token when processing JSX children.");
+ }
+ if (didEmitElement) {
+ needsComma = true;
+ }
+ }
+ }
+
+ /**
+ * Turn a JSX text element into a string literal, or nothing at all if the JSX
+ * text resolves to the empty string.
+ *
+ * Returns true if a string literal is emitted, false otherwise.
+ */
+ processChildTextElement(needsComma) {
+ const token = this.tokens.currentToken();
+ const valueCode = this.tokens.code.slice(token.start, token.end);
+ const replacementCode = formatJSXTextReplacement(valueCode);
+ const literalCode = formatJSXTextLiteral(valueCode);
+ if (literalCode === '""') {
+ this.tokens.replaceToken(replacementCode);
+ return false;
+ } else {
+ this.tokens.replaceToken(`${needsComma ? ", " : ""}${literalCode}${replacementCode}`);
+ return true;
+ }
+ }
+
+ getDevSource(elementLocationCode) {
+ return `{fileName: ${this.getFilenameVarName()}, ${elementLocationCode}}`;
+ }
+
+ getFilenameVarName() {
+ if (!this.filenameVarName) {
+ this.filenameVarName = this.nameManager.claimFreeName("_jsxFileName");
+ }
+ return this.filenameVarName;
+ }
+}
+
+/**
+ * Spec for identifiers: https://tc39.github.io/ecma262/#prod-IdentifierStart.
+ *
+ * Really only treat anything starting with a-z as tag names. `_`, `$`, `é`
+ * should be treated as component names
+ */
+export function startsWithLowerCase(s) {
+ const firstChar = s.charCodeAt(0);
+ return firstChar >= charCodes.lowercaseA && firstChar <= charCodes.lowercaseZ;
+}
+
+/**
+ * Turn the given jsxText string into a JS string literal. Leading and trailing
+ * whitespace on lines is removed, except immediately after the open-tag and
+ * before the close-tag. Empty lines are completely removed, and spaces are
+ * added between lines after that.
+ *
+ * We use JSON.stringify to introduce escape characters as necessary, and trim
+ * the start and end of each line and remove blank lines.
+ */
+function formatJSXTextLiteral(text) {
+ let result = "";
+ let whitespace = "";
+
+ let isInInitialLineWhitespace = false;
+ let seenNonWhitespace = false;
+ for (let i = 0; i < text.length; i++) {
+ const c = text[i];
+ if (c === " " || c === "\t" || c === "\r") {
+ if (!isInInitialLineWhitespace) {
+ whitespace += c;
+ }
+ } else if (c === "\n") {
+ whitespace = "";
+ isInInitialLineWhitespace = true;
+ } else {
+ if (seenNonWhitespace && isInInitialLineWhitespace) {
+ result += " ";
+ }
+ result += whitespace;
+ whitespace = "";
+ if (c === "&") {
+ const {entity, newI} = processEntity(text, i + 1);
+ i = newI - 1;
+ result += entity;
+ } else {
+ result += c;
+ }
+ seenNonWhitespace = true;
+ isInInitialLineWhitespace = false;
+ }
+ }
+ if (!isInInitialLineWhitespace) {
+ result += whitespace;
+ }
+ return JSON.stringify(result);
+}
+
+/**
+ * Produce the code that should be printed after the JSX text string literal,
+ * with most content removed, but all newlines preserved and all spacing at the
+ * end preserved.
+ */
+function formatJSXTextReplacement(text) {
+ let numNewlines = 0;
+ let numSpaces = 0;
+ for (const c of text) {
+ if (c === "\n") {
+ numNewlines++;
+ numSpaces = 0;
+ } else if (c === " ") {
+ numSpaces++;
+ }
+ }
+ return "\n".repeat(numNewlines) + " ".repeat(numSpaces);
+}
+
+/**
+ * Format a string in the value position of a JSX prop.
+ *
+ * Use the same implementation as convertAttribute from
+ * babel-helper-builder-react-jsx.
+ */
+function formatJSXStringValueLiteral(text) {
+ let result = "";
+ for (let i = 0; i < text.length; i++) {
+ const c = text[i];
+ if (c === "\n") {
+ if (/\s/.test(text[i + 1])) {
+ result += " ";
+ while (i < text.length && /\s/.test(text[i + 1])) {
+ i++;
+ }
+ } else {
+ result += "\n";
+ }
+ } else if (c === "&") {
+ const {entity, newI} = processEntity(text, i + 1);
+ result += entity;
+ i = newI - 1;
+ } else {
+ result += c;
+ }
+ }
+ return JSON.stringify(result);
+}
+
+/**
+ * Starting at a &, see if there's an HTML entity (specified by name, decimal
+ * char code, or hex char code) and return it if so.
+ *
+ * Modified from jsxReadString in babel-parser.
+ */
+function processEntity(text, indexAfterAmpersand) {
+ let str = "";
+ let count = 0;
+ let entity;
+ let i = indexAfterAmpersand;
+
+ if (text[i] === "#") {
+ let radix = 10;
+ i++;
+ let numStart;
+ if (text[i] === "x") {
+ radix = 16;
+ i++;
+ numStart = i;
+ while (i < text.length && isHexDigit(text.charCodeAt(i))) {
+ i++;
+ }
+ } else {
+ numStart = i;
+ while (i < text.length && isDecimalDigit(text.charCodeAt(i))) {
+ i++;
+ }
+ }
+ if (text[i] === ";") {
+ const numStr = text.slice(numStart, i);
+ if (numStr) {
+ i++;
+ entity = String.fromCodePoint(parseInt(numStr, radix));
+ }
+ }
+ } else {
+ while (i < text.length && count++ < 10) {
+ const ch = text[i];
+ i++;
+ if (ch === ";") {
+ entity = XHTMLEntities.get(str);
+ break;
+ }
+ str += ch;
+ }
+ }
+
+ if (!entity) {
+ return {entity: "&", newI: indexAfterAmpersand};
+ }
+ return {entity, newI: i};
+}
+
+function isDecimalDigit(code) {
+ return code >= charCodes.digit0 && code <= charCodes.digit9;
+}
+
+function isHexDigit(code) {
+ return (
+ (code >= charCodes.digit0 && code <= charCodes.digit9) ||
+ (code >= charCodes.lowercaseA && code <= charCodes.lowercaseF) ||
+ (code >= charCodes.uppercaseA && code <= charCodes.uppercaseF)
+ );
+}
diff --git a/node_modules/sucrase/dist/esm/transformers/JestHoistTransformer.js b/node_modules/sucrase/dist/esm/transformers/JestHoistTransformer.js
new file mode 100644
index 0000000..8f45d06
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/transformers/JestHoistTransformer.js
@@ -0,0 +1,111 @@
+ function _optionalChain(ops) { let lastAccessLHS = undefined; let value = ops[0]; let i = 1; while (i < ops.length) { const op = ops[i]; const fn = ops[i + 1]; i += 2; if ((op === 'optionalAccess' || op === 'optionalCall') && value == null) { return undefined; } if (op === 'access' || op === 'optionalAccess') { lastAccessLHS = value; value = fn(value); } else if (op === 'call' || op === 'optionalCall') { value = fn((...args) => value.call(lastAccessLHS, ...args)); lastAccessLHS = undefined; } } return value; }
+
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+
+import Transformer from "./Transformer";
+
+const JEST_GLOBAL_NAME = "jest";
+const HOISTED_METHODS = ["mock", "unmock", "enableAutomock", "disableAutomock"];
+
+/**
+ * Implementation of babel-plugin-jest-hoist, which hoists up some jest method
+ * calls above the imports to allow them to override other imports.
+ *
+ * To preserve line numbers, rather than directly moving the jest.mock code, we
+ * wrap each invocation in a function statement and then call the function from
+ * the top of the file.
+ */
+export default class JestHoistTransformer extends Transformer {
+ __init() {this.hoistedFunctionNames = []}
+
+ constructor(
+ rootTransformer,
+ tokens,
+ nameManager,
+ importProcessor,
+ ) {
+ super();this.rootTransformer = rootTransformer;this.tokens = tokens;this.nameManager = nameManager;this.importProcessor = importProcessor;JestHoistTransformer.prototype.__init.call(this);;
+ }
+
+ process() {
+ if (
+ this.tokens.currentToken().scopeDepth === 0 &&
+ this.tokens.matches4(tt.name, tt.dot, tt.name, tt.parenL) &&
+ this.tokens.identifierName() === JEST_GLOBAL_NAME
+ ) {
+ // TODO: This only works if imports transform is active, which it will be for jest.
+ // But if jest adds module support and we no longer need the import transform, this needs fixing.
+ if (_optionalChain([this, 'access', _ => _.importProcessor, 'optionalAccess', _2 => _2.getGlobalNames, 'call', _3 => _3(), 'optionalAccess', _4 => _4.has, 'call', _5 => _5(JEST_GLOBAL_NAME)])) {
+ return false;
+ }
+ return this.extractHoistedCalls();
+ }
+
+ return false;
+ }
+
+ getHoistedCode() {
+ if (this.hoistedFunctionNames.length > 0) {
+ // This will be placed before module interop code, but that's fine since
+ // imports aren't allowed in module mock factories.
+ return this.hoistedFunctionNames.map((name) => `${name}();`).join("");
+ }
+ return "";
+ }
+
+ /**
+ * Extracts any methods calls on the jest-object that should be hoisted.
+ *
+ * According to the jest docs, https://jestjs.io/docs/en/jest-object#jestmockmodulename-factory-options,
+ * mock, unmock, enableAutomock, disableAutomock, are the methods that should be hoisted.
+ *
+ * We do not apply the same checks of the arguments as babel-plugin-jest-hoist does.
+ */
+ extractHoistedCalls() {
+ // We're handling a chain of calls where `jest` may or may not need to be inserted for each call
+ // in the chain, so remove the initial `jest` to make the loop implementation cleaner.
+ this.tokens.removeToken();
+ // Track some state so that multiple non-hoisted chained calls in a row keep their chaining
+ // syntax.
+ let followsNonHoistedJestCall = false;
+
+ // Iterate through all chained calls on the jest object.
+ while (this.tokens.matches3(tt.dot, tt.name, tt.parenL)) {
+ const methodName = this.tokens.identifierNameAtIndex(this.tokens.currentIndex() + 1);
+ const shouldHoist = HOISTED_METHODS.includes(methodName);
+ if (shouldHoist) {
+ // We've matched e.g. `.mock(...)` or similar call.
+ // Replace the initial `.` with `function __jestHoist(){jest.`
+ const hoistedFunctionName = this.nameManager.claimFreeName("__jestHoist");
+ this.hoistedFunctionNames.push(hoistedFunctionName);
+ this.tokens.replaceToken(`function ${hoistedFunctionName}(){${JEST_GLOBAL_NAME}.`);
+ this.tokens.copyToken();
+ this.tokens.copyToken();
+ this.rootTransformer.processBalancedCode();
+ this.tokens.copyExpectedToken(tt.parenR);
+ this.tokens.appendCode(";}");
+ followsNonHoistedJestCall = false;
+ } else {
+ // This is a non-hoisted method, so just transform the code as usual.
+ if (followsNonHoistedJestCall) {
+ // If we didn't hoist the previous call, we can leave the code as-is to chain off of the
+ // previous method call. It's important to preserve the code here because we don't know
+ // for sure that the method actually returned the jest object for chaining.
+ this.tokens.copyToken();
+ } else {
+ // If we hoisted the previous call, we know it returns the jest object back, so we insert
+ // the identifier `jest` to continue the chain.
+ this.tokens.replaceToken(`${JEST_GLOBAL_NAME}.`);
+ }
+ this.tokens.copyToken();
+ this.tokens.copyToken();
+ this.rootTransformer.processBalancedCode();
+ this.tokens.copyExpectedToken(tt.parenR);
+ followsNonHoistedJestCall = true;
+ }
+ }
+
+ return true;
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/transformers/NumericSeparatorTransformer.js b/node_modules/sucrase/dist/esm/transformers/NumericSeparatorTransformer.js
new file mode 100644
index 0000000..0cb01a1
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/transformers/NumericSeparatorTransformer.js
@@ -0,0 +1,20 @@
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+import Transformer from "./Transformer";
+
+export default class NumericSeparatorTransformer extends Transformer {
+ constructor( tokens) {
+ super();this.tokens = tokens;;
+ }
+
+ process() {
+ if (this.tokens.matches1(tt.num)) {
+ const code = this.tokens.currentTokenCode();
+ if (code.includes("_")) {
+ this.tokens.replaceToken(code.replace(/_/g, ""));
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/transformers/OptionalCatchBindingTransformer.js b/node_modules/sucrase/dist/esm/transformers/OptionalCatchBindingTransformer.js
new file mode 100644
index 0000000..547273b
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/transformers/OptionalCatchBindingTransformer.js
@@ -0,0 +1,19 @@
+
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+import Transformer from "./Transformer";
+
+export default class OptionalCatchBindingTransformer extends Transformer {
+ constructor( tokens, nameManager) {
+ super();this.tokens = tokens;this.nameManager = nameManager;;
+ }
+
+ process() {
+ if (this.tokens.matches2(tt._catch, tt.braceL)) {
+ this.tokens.copyToken();
+ this.tokens.appendCode(` (${this.nameManager.claimFreeName("e")})`);
+ return true;
+ }
+ return false;
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/transformers/OptionalChainingNullishTransformer.js b/node_modules/sucrase/dist/esm/transformers/OptionalChainingNullishTransformer.js
new file mode 100644
index 0000000..571d97f
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/transformers/OptionalChainingNullishTransformer.js
@@ -0,0 +1,155 @@
+
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+import Transformer from "./Transformer";
+
+/**
+ * Transformer supporting the optional chaining and nullish coalescing operators.
+ *
+ * Tech plan here:
+ * https://github.com/alangpierce/sucrase/wiki/Sucrase-Optional-Chaining-and-Nullish-Coalescing-Technical-Plan
+ *
+ * The prefix and suffix code snippets are handled by TokenProcessor, and this transformer handles
+ * the operators themselves.
+ */
+export default class OptionalChainingNullishTransformer extends Transformer {
+ constructor( tokens, nameManager) {
+ super();this.tokens = tokens;this.nameManager = nameManager;;
+ }
+
+ process() {
+ if (this.tokens.matches1(tt.nullishCoalescing)) {
+ const token = this.tokens.currentToken();
+ if (this.tokens.tokens[token.nullishStartIndex].isAsyncOperation) {
+ this.tokens.replaceTokenTrimmingLeftWhitespace(", async () => (");
+ } else {
+ this.tokens.replaceTokenTrimmingLeftWhitespace(", () => (");
+ }
+ return true;
+ }
+ if (this.tokens.matches1(tt._delete)) {
+ const nextToken = this.tokens.tokenAtRelativeIndex(1);
+ if (nextToken.isOptionalChainStart) {
+ this.tokens.removeInitialToken();
+ return true;
+ }
+ }
+ const token = this.tokens.currentToken();
+ const chainStart = token.subscriptStartIndex;
+ if (
+ chainStart != null &&
+ this.tokens.tokens[chainStart].isOptionalChainStart &&
+ // Super subscripts can't be optional (since super is never null/undefined), and the syntax
+ // relies on the subscript being intact, so leave this token alone.
+ this.tokens.tokenAtRelativeIndex(-1).type !== tt._super
+ ) {
+ const param = this.nameManager.claimFreeName("_");
+ let arrowStartSnippet;
+ if (
+ chainStart > 0 &&
+ this.tokens.matches1AtIndex(chainStart - 1, tt._delete) &&
+ this.isLastSubscriptInChain()
+ ) {
+ // Delete operations are special: we already removed the delete keyword, and to still
+ // perform a delete, we need to insert a delete in the very last part of the chain, which
+ // in correct code will always be a property access.
+ arrowStartSnippet = `${param} => delete ${param}`;
+ } else {
+ arrowStartSnippet = `${param} => ${param}`;
+ }
+ if (this.tokens.tokens[chainStart].isAsyncOperation) {
+ arrowStartSnippet = `async ${arrowStartSnippet}`;
+ }
+ if (
+ this.tokens.matches2(tt.questionDot, tt.parenL) ||
+ this.tokens.matches2(tt.questionDot, tt.lessThan)
+ ) {
+ if (this.justSkippedSuper()) {
+ this.tokens.appendCode(".bind(this)");
+ }
+ this.tokens.replaceTokenTrimmingLeftWhitespace(`, 'optionalCall', ${arrowStartSnippet}`);
+ } else if (this.tokens.matches2(tt.questionDot, tt.bracketL)) {
+ this.tokens.replaceTokenTrimmingLeftWhitespace(`, 'optionalAccess', ${arrowStartSnippet}`);
+ } else if (this.tokens.matches1(tt.questionDot)) {
+ this.tokens.replaceTokenTrimmingLeftWhitespace(`, 'optionalAccess', ${arrowStartSnippet}.`);
+ } else if (this.tokens.matches1(tt.dot)) {
+ this.tokens.replaceTokenTrimmingLeftWhitespace(`, 'access', ${arrowStartSnippet}.`);
+ } else if (this.tokens.matches1(tt.bracketL)) {
+ this.tokens.replaceTokenTrimmingLeftWhitespace(`, 'access', ${arrowStartSnippet}[`);
+ } else if (this.tokens.matches1(tt.parenL)) {
+ if (this.justSkippedSuper()) {
+ this.tokens.appendCode(".bind(this)");
+ }
+ this.tokens.replaceTokenTrimmingLeftWhitespace(`, 'call', ${arrowStartSnippet}(`);
+ } else {
+ throw new Error("Unexpected subscript operator in optional chain.");
+ }
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Determine if the current token is the last of its chain, so that we know whether it's eligible
+ * to have a delete op inserted.
+ *
+ * We can do this by walking forward until we determine one way or another. Each
+ * isOptionalChainStart token must be paired with exactly one isOptionalChainEnd token after it in
+ * a nesting way, so we can track depth and walk to the end of the chain (the point where the
+ * depth goes negative) and see if any other subscript token is after us in the chain.
+ */
+ isLastSubscriptInChain() {
+ let depth = 0;
+ for (let i = this.tokens.currentIndex() + 1; ; i++) {
+ if (i >= this.tokens.tokens.length) {
+ throw new Error("Reached the end of the code while finding the end of the access chain.");
+ }
+ if (this.tokens.tokens[i].isOptionalChainStart) {
+ depth++;
+ } else if (this.tokens.tokens[i].isOptionalChainEnd) {
+ depth--;
+ }
+ if (depth < 0) {
+ return true;
+ }
+
+ // This subscript token is a later one in the same chain.
+ if (depth === 0 && this.tokens.tokens[i].subscriptStartIndex != null) {
+ return false;
+ }
+ }
+ }
+
+ /**
+ * Determine if we are the open-paren in an expression like super.a()?.b.
+ *
+ * We can do this by walking backward to find the previous subscript. If that subscript was
+ * preceded by a super, then we must be the subscript after it, so if this is a call expression,
+ * we'll need to attach the right context.
+ */
+ justSkippedSuper() {
+ let depth = 0;
+ let index = this.tokens.currentIndex() - 1;
+ while (true) {
+ if (index < 0) {
+ throw new Error(
+ "Reached the start of the code while finding the start of the access chain.",
+ );
+ }
+ if (this.tokens.tokens[index].isOptionalChainStart) {
+ depth--;
+ } else if (this.tokens.tokens[index].isOptionalChainEnd) {
+ depth++;
+ }
+ if (depth < 0) {
+ return false;
+ }
+
+ // This subscript token is a later one in the same chain.
+ if (depth === 0 && this.tokens.tokens[index].subscriptStartIndex != null) {
+ return this.tokens.tokens[index - 1].type === tt._super;
+ }
+ index--;
+ }
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/transformers/ReactDisplayNameTransformer.js b/node_modules/sucrase/dist/esm/transformers/ReactDisplayNameTransformer.js
new file mode 100644
index 0000000..0c44c81
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/transformers/ReactDisplayNameTransformer.js
@@ -0,0 +1,160 @@
+
+
+import {IdentifierRole} from "../parser/tokenizer";
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+
+import Transformer from "./Transformer";
+
+/**
+ * Implementation of babel-plugin-transform-react-display-name, which adds a
+ * display name to usages of React.createClass and createReactClass.
+ */
+export default class ReactDisplayNameTransformer extends Transformer {
+ constructor(
+ rootTransformer,
+ tokens,
+ importProcessor,
+ options,
+ ) {
+ super();this.rootTransformer = rootTransformer;this.tokens = tokens;this.importProcessor = importProcessor;this.options = options;;
+ }
+
+ process() {
+ const startIndex = this.tokens.currentIndex();
+ if (this.tokens.identifierName() === "createReactClass") {
+ const newName =
+ this.importProcessor && this.importProcessor.getIdentifierReplacement("createReactClass");
+ if (newName) {
+ this.tokens.replaceToken(`(0, ${newName})`);
+ } else {
+ this.tokens.copyToken();
+ }
+ this.tryProcessCreateClassCall(startIndex);
+ return true;
+ }
+ if (
+ this.tokens.matches3(tt.name, tt.dot, tt.name) &&
+ this.tokens.identifierName() === "React" &&
+ this.tokens.identifierNameAtIndex(this.tokens.currentIndex() + 2) === "createClass"
+ ) {
+ const newName = this.importProcessor
+ ? this.importProcessor.getIdentifierReplacement("React") || "React"
+ : "React";
+ if (newName) {
+ this.tokens.replaceToken(newName);
+ this.tokens.copyToken();
+ this.tokens.copyToken();
+ } else {
+ this.tokens.copyToken();
+ this.tokens.copyToken();
+ this.tokens.copyToken();
+ }
+ this.tryProcessCreateClassCall(startIndex);
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * This is called with the token position at the open-paren.
+ */
+ tryProcessCreateClassCall(startIndex) {
+ const displayName = this.findDisplayName(startIndex);
+ if (!displayName) {
+ return;
+ }
+
+ if (this.classNeedsDisplayName()) {
+ this.tokens.copyExpectedToken(tt.parenL);
+ this.tokens.copyExpectedToken(tt.braceL);
+ this.tokens.appendCode(`displayName: '${displayName}',`);
+ this.rootTransformer.processBalancedCode();
+ this.tokens.copyExpectedToken(tt.braceR);
+ this.tokens.copyExpectedToken(tt.parenR);
+ }
+ }
+
+ findDisplayName(startIndex) {
+ if (startIndex < 2) {
+ return null;
+ }
+ if (this.tokens.matches2AtIndex(startIndex - 2, tt.name, tt.eq)) {
+ // This is an assignment (or declaration) and the LHS is either an identifier or a member
+ // expression ending in an identifier, so use that identifier name.
+ return this.tokens.identifierNameAtIndex(startIndex - 2);
+ }
+ if (
+ startIndex >= 2 &&
+ this.tokens.tokens[startIndex - 2].identifierRole === IdentifierRole.ObjectKey
+ ) {
+ // This is an object literal value.
+ return this.tokens.identifierNameAtIndex(startIndex - 2);
+ }
+ if (this.tokens.matches2AtIndex(startIndex - 2, tt._export, tt._default)) {
+ return this.getDisplayNameFromFilename();
+ }
+ return null;
+ }
+
+ getDisplayNameFromFilename() {
+ const filePath = this.options.filePath || "unknown";
+ const pathSegments = filePath.split("/");
+ const filename = pathSegments[pathSegments.length - 1];
+ const dotIndex = filename.lastIndexOf(".");
+ const baseFilename = dotIndex === -1 ? filename : filename.slice(0, dotIndex);
+ if (baseFilename === "index" && pathSegments[pathSegments.length - 2]) {
+ return pathSegments[pathSegments.length - 2];
+ } else {
+ return baseFilename;
+ }
+ }
+
+ /**
+ * We only want to add a display name when this is a function call containing
+ * one argument, which is an object literal without `displayName` as an
+ * existing key.
+ */
+ classNeedsDisplayName() {
+ let index = this.tokens.currentIndex();
+ if (!this.tokens.matches2(tt.parenL, tt.braceL)) {
+ return false;
+ }
+ // The block starts on the {, and we expect any displayName key to be in
+ // that context. We need to ignore other other contexts to avoid matching
+ // nested displayName keys.
+ const objectStartIndex = index + 1;
+ const objectContextId = this.tokens.tokens[objectStartIndex].contextId;
+ if (objectContextId == null) {
+ throw new Error("Expected non-null context ID on object open-brace.");
+ }
+
+ for (; index < this.tokens.tokens.length; index++) {
+ const token = this.tokens.tokens[index];
+ if (token.type === tt.braceR && token.contextId === objectContextId) {
+ index++;
+ break;
+ }
+
+ if (
+ this.tokens.identifierNameAtIndex(index) === "displayName" &&
+ this.tokens.tokens[index].identifierRole === IdentifierRole.ObjectKey &&
+ token.contextId === objectContextId
+ ) {
+ // We found a displayName key, so bail out.
+ return false;
+ }
+ }
+
+ if (index === this.tokens.tokens.length) {
+ throw new Error("Unexpected end of input when processing React class.");
+ }
+
+ // If we got this far, we know we have createClass with an object with no
+ // display name, so we want to proceed as long as that was the only argument.
+ return (
+ this.tokens.matches1AtIndex(index, tt.parenR) ||
+ this.tokens.matches2AtIndex(index, tt.comma, tt.parenR)
+ );
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/transformers/ReactHotLoaderTransformer.js b/node_modules/sucrase/dist/esm/transformers/ReactHotLoaderTransformer.js
new file mode 100644
index 0000000..873902e
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/transformers/ReactHotLoaderTransformer.js
@@ -0,0 +1,69 @@
+import {IdentifierRole, isTopLevelDeclaration} from "../parser/tokenizer";
+
+import Transformer from "./Transformer";
+
+export default class ReactHotLoaderTransformer extends Transformer {
+ __init() {this.extractedDefaultExportName = null}
+
+ constructor( tokens, filePath) {
+ super();this.tokens = tokens;this.filePath = filePath;ReactHotLoaderTransformer.prototype.__init.call(this);;
+ }
+
+ setExtractedDefaultExportName(extractedDefaultExportName) {
+ this.extractedDefaultExportName = extractedDefaultExportName;
+ }
+
+ getPrefixCode() {
+ return `
+ (function () {
+ var enterModule = require('react-hot-loader').enterModule;
+ enterModule && enterModule(module);
+ })();`
+ .replace(/\s+/g, " ")
+ .trim();
+ }
+
+ getSuffixCode() {
+ const topLevelNames = new Set();
+ for (const token of this.tokens.tokens) {
+ if (
+ !token.isType &&
+ isTopLevelDeclaration(token) &&
+ token.identifierRole !== IdentifierRole.ImportDeclaration
+ ) {
+ topLevelNames.add(this.tokens.identifierNameForToken(token));
+ }
+ }
+ const namesToRegister = Array.from(topLevelNames).map((name) => ({
+ variableName: name,
+ uniqueLocalName: name,
+ }));
+ if (this.extractedDefaultExportName) {
+ namesToRegister.push({
+ variableName: this.extractedDefaultExportName,
+ uniqueLocalName: "default",
+ });
+ }
+ return `
+;(function () {
+ var reactHotLoader = require('react-hot-loader').default;
+ var leaveModule = require('react-hot-loader').leaveModule;
+ if (!reactHotLoader) {
+ return;
+ }
+${namesToRegister
+ .map(
+ ({variableName, uniqueLocalName}) =>
+ ` reactHotLoader.register(${variableName}, "${uniqueLocalName}", ${JSON.stringify(
+ this.filePath || "",
+ )});`,
+ )
+ .join("\n")}
+ leaveModule(module);
+})();`;
+ }
+
+ process() {
+ return false;
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/transformers/RootTransformer.js b/node_modules/sucrase/dist/esm/transformers/RootTransformer.js
new file mode 100644
index 0000000..c7a83fe
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/transformers/RootTransformer.js
@@ -0,0 +1,462 @@
+
+
+
+import {ContextualKeyword} from "../parser/tokenizer/keywords";
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+import getClassInfo, {} from "../util/getClassInfo";
+import CJSImportTransformer from "./CJSImportTransformer";
+import ESMImportTransformer from "./ESMImportTransformer";
+import FlowTransformer from "./FlowTransformer";
+import JestHoistTransformer from "./JestHoistTransformer";
+import JSXTransformer from "./JSXTransformer";
+import NumericSeparatorTransformer from "./NumericSeparatorTransformer";
+import OptionalCatchBindingTransformer from "./OptionalCatchBindingTransformer";
+import OptionalChainingNullishTransformer from "./OptionalChainingNullishTransformer";
+import ReactDisplayNameTransformer from "./ReactDisplayNameTransformer";
+import ReactHotLoaderTransformer from "./ReactHotLoaderTransformer";
+
+import TypeScriptTransformer from "./TypeScriptTransformer";
+
+
+
+
+
+
+
+
+export default class RootTransformer {
+ __init() {this.transformers = []}
+
+
+ __init2() {this.generatedVariables = []}
+
+
+
+
+
+ constructor(
+ sucraseContext,
+ transforms,
+ enableLegacyBabel5ModuleInterop,
+ options,
+ ) {;RootTransformer.prototype.__init.call(this);RootTransformer.prototype.__init2.call(this);
+ this.nameManager = sucraseContext.nameManager;
+ this.helperManager = sucraseContext.helperManager;
+ const {tokenProcessor, importProcessor} = sucraseContext;
+ this.tokens = tokenProcessor;
+ this.isImportsTransformEnabled = transforms.includes("imports");
+ this.isReactHotLoaderTransformEnabled = transforms.includes("react-hot-loader");
+ this.disableESTransforms = Boolean(options.disableESTransforms);
+
+ if (!options.disableESTransforms) {
+ this.transformers.push(
+ new OptionalChainingNullishTransformer(tokenProcessor, this.nameManager),
+ );
+ this.transformers.push(new NumericSeparatorTransformer(tokenProcessor));
+ this.transformers.push(new OptionalCatchBindingTransformer(tokenProcessor, this.nameManager));
+ }
+
+ if (transforms.includes("jsx")) {
+ if (options.jsxRuntime !== "preserve") {
+ this.transformers.push(
+ new JSXTransformer(this, tokenProcessor, importProcessor, this.nameManager, options),
+ );
+ }
+ this.transformers.push(
+ new ReactDisplayNameTransformer(this, tokenProcessor, importProcessor, options),
+ );
+ }
+
+ let reactHotLoaderTransformer = null;
+ if (transforms.includes("react-hot-loader")) {
+ if (!options.filePath) {
+ throw new Error("filePath is required when using the react-hot-loader transform.");
+ }
+ reactHotLoaderTransformer = new ReactHotLoaderTransformer(tokenProcessor, options.filePath);
+ this.transformers.push(reactHotLoaderTransformer);
+ }
+
+ // Note that we always want to enable the imports transformer, even when the import transform
+ // itself isn't enabled, since we need to do type-only import pruning for both Flow and
+ // TypeScript.
+ if (transforms.includes("imports")) {
+ if (importProcessor === null) {
+ throw new Error("Expected non-null importProcessor with imports transform enabled.");
+ }
+ this.transformers.push(
+ new CJSImportTransformer(
+ this,
+ tokenProcessor,
+ importProcessor,
+ this.nameManager,
+ this.helperManager,
+ reactHotLoaderTransformer,
+ enableLegacyBabel5ModuleInterop,
+ Boolean(options.enableLegacyTypeScriptModuleInterop),
+ transforms.includes("typescript"),
+ transforms.includes("flow"),
+ Boolean(options.preserveDynamicImport),
+ Boolean(options.keepUnusedImports),
+ ),
+ );
+ } else {
+ this.transformers.push(
+ new ESMImportTransformer(
+ tokenProcessor,
+ this.nameManager,
+ this.helperManager,
+ reactHotLoaderTransformer,
+ transforms.includes("typescript"),
+ transforms.includes("flow"),
+ Boolean(options.keepUnusedImports),
+ options,
+ ),
+ );
+ }
+
+ if (transforms.includes("flow")) {
+ this.transformers.push(
+ new FlowTransformer(this, tokenProcessor, transforms.includes("imports")),
+ );
+ }
+ if (transforms.includes("typescript")) {
+ this.transformers.push(
+ new TypeScriptTransformer(this, tokenProcessor, transforms.includes("imports")),
+ );
+ }
+ if (transforms.includes("jest")) {
+ this.transformers.push(
+ new JestHoistTransformer(this, tokenProcessor, this.nameManager, importProcessor),
+ );
+ }
+ }
+
+ transform() {
+ this.tokens.reset();
+ this.processBalancedCode();
+ const shouldAddUseStrict = this.isImportsTransformEnabled;
+ // "use strict" always needs to be first, so override the normal transformer order.
+ let prefix = shouldAddUseStrict ? '"use strict";' : "";
+ for (const transformer of this.transformers) {
+ prefix += transformer.getPrefixCode();
+ }
+ prefix += this.helperManager.emitHelpers();
+ prefix += this.generatedVariables.map((v) => ` var ${v};`).join("");
+ for (const transformer of this.transformers) {
+ prefix += transformer.getHoistedCode();
+ }
+ let suffix = "";
+ for (const transformer of this.transformers) {
+ suffix += transformer.getSuffixCode();
+ }
+ const result = this.tokens.finish();
+ let {code} = result;
+ if (code.startsWith("#!")) {
+ let newlineIndex = code.indexOf("\n");
+ if (newlineIndex === -1) {
+ newlineIndex = code.length;
+ code += "\n";
+ }
+ return {
+ code: code.slice(0, newlineIndex + 1) + prefix + code.slice(newlineIndex + 1) + suffix,
+ // The hashbang line has no tokens, so shifting the tokens to account
+ // for prefix can happen normally.
+ mappings: this.shiftMappings(result.mappings, prefix.length),
+ };
+ } else {
+ return {
+ code: prefix + code + suffix,
+ mappings: this.shiftMappings(result.mappings, prefix.length),
+ };
+ }
+ }
+
+ processBalancedCode() {
+ let braceDepth = 0;
+ let parenDepth = 0;
+ while (!this.tokens.isAtEnd()) {
+ if (this.tokens.matches1(tt.braceL) || this.tokens.matches1(tt.dollarBraceL)) {
+ braceDepth++;
+ } else if (this.tokens.matches1(tt.braceR)) {
+ if (braceDepth === 0) {
+ return;
+ }
+ braceDepth--;
+ }
+ if (this.tokens.matches1(tt.parenL)) {
+ parenDepth++;
+ } else if (this.tokens.matches1(tt.parenR)) {
+ if (parenDepth === 0) {
+ return;
+ }
+ parenDepth--;
+ }
+ this.processToken();
+ }
+ }
+
+ processToken() {
+ if (this.tokens.matches1(tt._class)) {
+ this.processClass();
+ return;
+ }
+ for (const transformer of this.transformers) {
+ const wasProcessed = transformer.process();
+ if (wasProcessed) {
+ return;
+ }
+ }
+ this.tokens.copyToken();
+ }
+
+ /**
+ * Skip past a class with a name and return that name.
+ */
+ processNamedClass() {
+ if (!this.tokens.matches2(tt._class, tt.name)) {
+ throw new Error("Expected identifier for exported class name.");
+ }
+ const name = this.tokens.identifierNameAtIndex(this.tokens.currentIndex() + 1);
+ this.processClass();
+ return name;
+ }
+
+ processClass() {
+ const classInfo = getClassInfo(this, this.tokens, this.nameManager, this.disableESTransforms);
+
+ // Both static and instance initializers need a class name to use to invoke the initializer, so
+ // assign to one if necessary.
+ const needsCommaExpression =
+ (classInfo.headerInfo.isExpression || !classInfo.headerInfo.className) &&
+ classInfo.staticInitializerNames.length + classInfo.instanceInitializerNames.length > 0;
+
+ let className = classInfo.headerInfo.className;
+ if (needsCommaExpression) {
+ className = this.nameManager.claimFreeName("_class");
+ this.generatedVariables.push(className);
+ this.tokens.appendCode(` (${className} =`);
+ }
+
+ const classToken = this.tokens.currentToken();
+ const contextId = classToken.contextId;
+ if (contextId == null) {
+ throw new Error("Expected class to have a context ID.");
+ }
+ this.tokens.copyExpectedToken(tt._class);
+ while (!this.tokens.matchesContextIdAndLabel(tt.braceL, contextId)) {
+ this.processToken();
+ }
+
+ this.processClassBody(classInfo, className);
+
+ const staticInitializerStatements = classInfo.staticInitializerNames.map(
+ (name) => `${className}.${name}()`,
+ );
+ if (needsCommaExpression) {
+ this.tokens.appendCode(
+ `, ${staticInitializerStatements.map((s) => `${s}, `).join("")}${className})`,
+ );
+ } else if (classInfo.staticInitializerNames.length > 0) {
+ this.tokens.appendCode(` ${staticInitializerStatements.map((s) => `${s};`).join(" ")}`);
+ }
+ }
+
+ /**
+ * We want to just handle class fields in all contexts, since TypeScript supports them. Later,
+ * when some JS implementations support class fields, this should be made optional.
+ */
+ processClassBody(classInfo, className) {
+ const {
+ headerInfo,
+ constructorInsertPos,
+ constructorInitializerStatements,
+ fields,
+ instanceInitializerNames,
+ rangesToRemove,
+ } = classInfo;
+ let fieldIndex = 0;
+ let rangeToRemoveIndex = 0;
+ const classContextId = this.tokens.currentToken().contextId;
+ if (classContextId == null) {
+ throw new Error("Expected non-null context ID on class.");
+ }
+ this.tokens.copyExpectedToken(tt.braceL);
+ if (this.isReactHotLoaderTransformEnabled) {
+ this.tokens.appendCode(
+ "__reactstandin__regenerateByEval(key, code) {this[key] = eval(code);}",
+ );
+ }
+
+ const needsConstructorInit =
+ constructorInitializerStatements.length + instanceInitializerNames.length > 0;
+
+ if (constructorInsertPos === null && needsConstructorInit) {
+ const constructorInitializersCode = this.makeConstructorInitCode(
+ constructorInitializerStatements,
+ instanceInitializerNames,
+ className,
+ );
+ if (headerInfo.hasSuperclass) {
+ const argsName = this.nameManager.claimFreeName("args");
+ this.tokens.appendCode(
+ `constructor(...${argsName}) { super(...${argsName}); ${constructorInitializersCode}; }`,
+ );
+ } else {
+ this.tokens.appendCode(`constructor() { ${constructorInitializersCode}; }`);
+ }
+ }
+
+ while (!this.tokens.matchesContextIdAndLabel(tt.braceR, classContextId)) {
+ if (fieldIndex < fields.length && this.tokens.currentIndex() === fields[fieldIndex].start) {
+ let needsCloseBrace = false;
+ if (this.tokens.matches1(tt.bracketL)) {
+ this.tokens.copyTokenWithPrefix(`${fields[fieldIndex].initializerName}() {this`);
+ } else if (this.tokens.matches1(tt.string) || this.tokens.matches1(tt.num)) {
+ this.tokens.copyTokenWithPrefix(`${fields[fieldIndex].initializerName}() {this[`);
+ needsCloseBrace = true;
+ } else {
+ this.tokens.copyTokenWithPrefix(`${fields[fieldIndex].initializerName}() {this.`);
+ }
+ while (this.tokens.currentIndex() < fields[fieldIndex].end) {
+ if (needsCloseBrace && this.tokens.currentIndex() === fields[fieldIndex].equalsIndex) {
+ this.tokens.appendCode("]");
+ }
+ this.processToken();
+ }
+ this.tokens.appendCode("}");
+ fieldIndex++;
+ } else if (
+ rangeToRemoveIndex < rangesToRemove.length &&
+ this.tokens.currentIndex() >= rangesToRemove[rangeToRemoveIndex].start
+ ) {
+ if (this.tokens.currentIndex() < rangesToRemove[rangeToRemoveIndex].end) {
+ this.tokens.removeInitialToken();
+ }
+ while (this.tokens.currentIndex() < rangesToRemove[rangeToRemoveIndex].end) {
+ this.tokens.removeToken();
+ }
+ rangeToRemoveIndex++;
+ } else if (this.tokens.currentIndex() === constructorInsertPos) {
+ this.tokens.copyToken();
+ if (needsConstructorInit) {
+ this.tokens.appendCode(
+ `;${this.makeConstructorInitCode(
+ constructorInitializerStatements,
+ instanceInitializerNames,
+ className,
+ )};`,
+ );
+ }
+ this.processToken();
+ } else {
+ this.processToken();
+ }
+ }
+ this.tokens.copyExpectedToken(tt.braceR);
+ }
+
+ makeConstructorInitCode(
+ constructorInitializerStatements,
+ instanceInitializerNames,
+ className,
+ ) {
+ return [
+ ...constructorInitializerStatements,
+ ...instanceInitializerNames.map((name) => `${className}.prototype.${name}.call(this)`),
+ ].join(";");
+ }
+
+ /**
+ * Normally it's ok to simply remove type tokens, but we need to be more careful when dealing with
+ * arrow function return types since they can confuse the parser. In that case, we want to move
+ * the close-paren to the same line as the arrow.
+ *
+ * See https://github.com/alangpierce/sucrase/issues/391 for more details.
+ */
+ processPossibleArrowParamEnd() {
+ if (this.tokens.matches2(tt.parenR, tt.colon) && this.tokens.tokenAtRelativeIndex(1).isType) {
+ let nextNonTypeIndex = this.tokens.currentIndex() + 1;
+ // Look ahead to see if this is an arrow function or something else.
+ while (this.tokens.tokens[nextNonTypeIndex].isType) {
+ nextNonTypeIndex++;
+ }
+ if (this.tokens.matches1AtIndex(nextNonTypeIndex, tt.arrow)) {
+ this.tokens.removeInitialToken();
+ while (this.tokens.currentIndex() < nextNonTypeIndex) {
+ this.tokens.removeToken();
+ }
+ this.tokens.replaceTokenTrimmingLeftWhitespace(") =>");
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * An async arrow function might be of the form:
+ *
+ * async <
+ * T
+ * >() => {}
+ *
+ * in which case, removing the type parameters will cause a syntax error. Detect this case and
+ * move the open-paren earlier.
+ */
+ processPossibleAsyncArrowWithTypeParams() {
+ if (
+ !this.tokens.matchesContextual(ContextualKeyword._async) &&
+ !this.tokens.matches1(tt._async)
+ ) {
+ return false;
+ }
+ const nextToken = this.tokens.tokenAtRelativeIndex(1);
+ if (nextToken.type !== tt.lessThan || !nextToken.isType) {
+ return false;
+ }
+
+ let nextNonTypeIndex = this.tokens.currentIndex() + 1;
+ // Look ahead to see if this is an arrow function or something else.
+ while (this.tokens.tokens[nextNonTypeIndex].isType) {
+ nextNonTypeIndex++;
+ }
+ if (this.tokens.matches1AtIndex(nextNonTypeIndex, tt.parenL)) {
+ this.tokens.replaceToken("async (");
+ this.tokens.removeInitialToken();
+ while (this.tokens.currentIndex() < nextNonTypeIndex) {
+ this.tokens.removeToken();
+ }
+ this.tokens.removeToken();
+ // We ate a ( token, so we need to process the tokens in between and then the ) token so that
+ // we remain balanced.
+ this.processBalancedCode();
+ this.processToken();
+ return true;
+ }
+ return false;
+ }
+
+ processPossibleTypeRange() {
+ if (this.tokens.currentToken().isType) {
+ this.tokens.removeInitialToken();
+ while (this.tokens.currentToken().isType) {
+ this.tokens.removeToken();
+ }
+ return true;
+ }
+ return false;
+ }
+
+ shiftMappings(
+ mappings,
+ prefixLength,
+ ) {
+ for (let i = 0; i < mappings.length; i++) {
+ const mapping = mappings[i];
+ if (mapping !== undefined) {
+ mappings[i] = mapping + prefixLength;
+ }
+ }
+ return mappings;
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/transformers/Transformer.js b/node_modules/sucrase/dist/esm/transformers/Transformer.js
new file mode 100644
index 0000000..5e8e9e7
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/transformers/Transformer.js
@@ -0,0 +1,16 @@
+export default class Transformer {
+ // Return true if anything was processed, false otherwise.
+
+
+ getPrefixCode() {
+ return "";
+ }
+
+ getHoistedCode() {
+ return "";
+ }
+
+ getSuffixCode() {
+ return "";
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/transformers/TypeScriptTransformer.js b/node_modules/sucrase/dist/esm/transformers/TypeScriptTransformer.js
new file mode 100644
index 0000000..67e1274
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/transformers/TypeScriptTransformer.js
@@ -0,0 +1,279 @@
+
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+import isIdentifier from "../util/isIdentifier";
+
+import Transformer from "./Transformer";
+
+export default class TypeScriptTransformer extends Transformer {
+ constructor(
+ rootTransformer,
+ tokens,
+ isImportsTransformEnabled,
+ ) {
+ super();this.rootTransformer = rootTransformer;this.tokens = tokens;this.isImportsTransformEnabled = isImportsTransformEnabled;;
+ }
+
+ process() {
+ if (
+ this.rootTransformer.processPossibleArrowParamEnd() ||
+ this.rootTransformer.processPossibleAsyncArrowWithTypeParams() ||
+ this.rootTransformer.processPossibleTypeRange()
+ ) {
+ return true;
+ }
+ if (
+ this.tokens.matches1(tt._public) ||
+ this.tokens.matches1(tt._protected) ||
+ this.tokens.matches1(tt._private) ||
+ this.tokens.matches1(tt._abstract) ||
+ this.tokens.matches1(tt._readonly) ||
+ this.tokens.matches1(tt._override) ||
+ this.tokens.matches1(tt.nonNullAssertion)
+ ) {
+ this.tokens.removeInitialToken();
+ return true;
+ }
+ if (this.tokens.matches1(tt._enum) || this.tokens.matches2(tt._const, tt._enum)) {
+ this.processEnum();
+ return true;
+ }
+ if (
+ this.tokens.matches2(tt._export, tt._enum) ||
+ this.tokens.matches3(tt._export, tt._const, tt._enum)
+ ) {
+ this.processEnum(true);
+ return true;
+ }
+ return false;
+ }
+
+ processEnum(isExport = false) {
+ // We might have "export const enum", so just remove all relevant tokens.
+ this.tokens.removeInitialToken();
+ while (this.tokens.matches1(tt._const) || this.tokens.matches1(tt._enum)) {
+ this.tokens.removeToken();
+ }
+ const enumName = this.tokens.identifierName();
+ this.tokens.removeToken();
+ if (isExport && !this.isImportsTransformEnabled) {
+ this.tokens.appendCode("export ");
+ }
+ this.tokens.appendCode(`var ${enumName}; (function (${enumName})`);
+ this.tokens.copyExpectedToken(tt.braceL);
+ this.processEnumBody(enumName);
+ this.tokens.copyExpectedToken(tt.braceR);
+ if (isExport && this.isImportsTransformEnabled) {
+ this.tokens.appendCode(`)(${enumName} || (exports.${enumName} = ${enumName} = {}));`);
+ } else {
+ this.tokens.appendCode(`)(${enumName} || (${enumName} = {}));`);
+ }
+ }
+
+ /**
+ * Transform an enum into equivalent JS. This has complexity in a few places:
+ * - TS allows string enums, numeric enums, and a mix of the two styles within an enum.
+ * - Enum keys are allowed to be referenced in later enum values.
+ * - Enum keys are allowed to be strings.
+ * - When enum values are omitted, they should follow an auto-increment behavior.
+ */
+ processEnumBody(enumName) {
+ // Code that can be used to reference the previous enum member, or null if this is the first
+ // enum member.
+ let previousValueCode = null;
+ while (true) {
+ if (this.tokens.matches1(tt.braceR)) {
+ break;
+ }
+ const {nameStringCode, variableName} = this.extractEnumKeyInfo(this.tokens.currentToken());
+ this.tokens.removeInitialToken();
+
+ if (
+ this.tokens.matches3(tt.eq, tt.string, tt.comma) ||
+ this.tokens.matches3(tt.eq, tt.string, tt.braceR)
+ ) {
+ this.processStringLiteralEnumMember(enumName, nameStringCode, variableName);
+ } else if (this.tokens.matches1(tt.eq)) {
+ this.processExplicitValueEnumMember(enumName, nameStringCode, variableName);
+ } else {
+ this.processImplicitValueEnumMember(
+ enumName,
+ nameStringCode,
+ variableName,
+ previousValueCode,
+ );
+ }
+ if (this.tokens.matches1(tt.comma)) {
+ this.tokens.removeToken();
+ }
+
+ if (variableName != null) {
+ previousValueCode = variableName;
+ } else {
+ previousValueCode = `${enumName}[${nameStringCode}]`;
+ }
+ }
+ }
+
+ /**
+ * Detect name information about this enum key, which will be used to determine which code to emit
+ * and whether we should declare a variable as part of this declaration.
+ *
+ * Some cases to keep in mind:
+ * - Enum keys can be implicitly referenced later, e.g. `X = 1, Y = X`. In Sucrase, we implement
+ * this by declaring a variable `X` so that later expressions can use it.
+ * - In addition to the usual identifier key syntax, enum keys are allowed to be string literals,
+ * e.g. `"hello world" = 3,`. Template literal syntax is NOT allowed.
+ * - Even if the enum key is defined as a string literal, it may still be referenced by identifier
+ * later, e.g. `"X" = 1, Y = X`. That means that we need to detect whether or not a string
+ * literal is identifier-like and emit a variable if so, even if the declaration did not use an
+ * identifier.
+ * - Reserved keywords like `break` are valid enum keys, but are not valid to be referenced later
+ * and would be a syntax error if we emitted a variable, so we need to skip the variable
+ * declaration in those cases.
+ *
+ * The variableName return value captures these nuances: if non-null, we can and must emit a
+ * variable declaration, and if null, we can't and shouldn't.
+ */
+ extractEnumKeyInfo(nameToken) {
+ if (nameToken.type === tt.name) {
+ const name = this.tokens.identifierNameForToken(nameToken);
+ return {
+ nameStringCode: `"${name}"`,
+ variableName: isIdentifier(name) ? name : null,
+ };
+ } else if (nameToken.type === tt.string) {
+ const name = this.tokens.stringValueForToken(nameToken);
+ return {
+ nameStringCode: this.tokens.code.slice(nameToken.start, nameToken.end),
+ variableName: isIdentifier(name) ? name : null,
+ };
+ } else {
+ throw new Error("Expected name or string at beginning of enum element.");
+ }
+ }
+
+ /**
+ * Handle an enum member where the RHS is just a string literal (not omitted, not a number, and
+ * not a complex expression). This is the typical form for TS string enums, and in this case, we
+ * do *not* create a reverse mapping.
+ *
+ * This is called after deleting the key token, when the token processor is at the equals sign.
+ *
+ * Example 1:
+ * someKey = "some value"
+ * ->
+ * const someKey = "some value"; MyEnum["someKey"] = someKey;
+ *
+ * Example 2:
+ * "some key" = "some value"
+ * ->
+ * MyEnum["some key"] = "some value";
+ */
+ processStringLiteralEnumMember(
+ enumName,
+ nameStringCode,
+ variableName,
+ ) {
+ if (variableName != null) {
+ this.tokens.appendCode(`const ${variableName}`);
+ // =
+ this.tokens.copyToken();
+ // value string
+ this.tokens.copyToken();
+ this.tokens.appendCode(`; ${enumName}[${nameStringCode}] = ${variableName};`);
+ } else {
+ this.tokens.appendCode(`${enumName}[${nameStringCode}]`);
+ // =
+ this.tokens.copyToken();
+ // value string
+ this.tokens.copyToken();
+ this.tokens.appendCode(";");
+ }
+ }
+
+ /**
+ * Handle an enum member initialized with an expression on the right-hand side (other than a
+ * string literal). In these cases, we should transform the expression and emit code that sets up
+ * a reverse mapping.
+ *
+ * The TypeScript implementation of this operation distinguishes between expressions that can be
+ * "constant folded" at compile time (i.e. consist of number literals and simple math operations
+ * on those numbers) and ones that are dynamic. For constant expressions, it emits the resolved
+ * numeric value, and auto-incrementing is only allowed in that case. Evaluating expressions at
+ * compile time would add significant complexity to Sucrase, so Sucrase instead leaves the
+ * expression as-is, and will later emit something like `MyEnum["previousKey"] + 1` to implement
+ * auto-incrementing.
+ *
+ * This is called after deleting the key token, when the token processor is at the equals sign.
+ *
+ * Example 1:
+ * someKey = 1 + 1
+ * ->
+ * const someKey = 1 + 1; MyEnum[MyEnum["someKey"] = someKey] = "someKey";
+ *
+ * Example 2:
+ * "some key" = 1 + 1
+ * ->
+ * MyEnum[MyEnum["some key"] = 1 + 1] = "some key";
+ */
+ processExplicitValueEnumMember(
+ enumName,
+ nameStringCode,
+ variableName,
+ ) {
+ const rhsEndIndex = this.tokens.currentToken().rhsEndIndex;
+ if (rhsEndIndex == null) {
+ throw new Error("Expected rhsEndIndex on enum assign.");
+ }
+
+ if (variableName != null) {
+ this.tokens.appendCode(`const ${variableName}`);
+ this.tokens.copyToken();
+ while (this.tokens.currentIndex() < rhsEndIndex) {
+ this.rootTransformer.processToken();
+ }
+ this.tokens.appendCode(
+ `; ${enumName}[${enumName}[${nameStringCode}] = ${variableName}] = ${nameStringCode};`,
+ );
+ } else {
+ this.tokens.appendCode(`${enumName}[${enumName}[${nameStringCode}]`);
+ this.tokens.copyToken();
+ while (this.tokens.currentIndex() < rhsEndIndex) {
+ this.rootTransformer.processToken();
+ }
+ this.tokens.appendCode(`] = ${nameStringCode};`);
+ }
+ }
+
+ /**
+ * Handle an enum member with no right-hand side expression. In this case, the value is the
+ * previous value plus 1, or 0 if there was no previous value. We should also always emit a
+ * reverse mapping.
+ *
+ * Example 1:
+ * someKey2
+ * ->
+ * const someKey2 = someKey1 + 1; MyEnum[MyEnum["someKey2"] = someKey2] = "someKey2";
+ *
+ * Example 2:
+ * "some key 2"
+ * ->
+ * MyEnum[MyEnum["some key 2"] = someKey1 + 1] = "some key 2";
+ */
+ processImplicitValueEnumMember(
+ enumName,
+ nameStringCode,
+ variableName,
+ previousValueCode,
+ ) {
+ let valueCode = previousValueCode != null ? `${previousValueCode} + 1` : "0";
+ if (variableName != null) {
+ this.tokens.appendCode(`const ${variableName} = ${valueCode}; `);
+ valueCode = variableName;
+ }
+ this.tokens.appendCode(
+ `${enumName}[${enumName}[${nameStringCode}] = ${valueCode}] = ${nameStringCode};`,
+ );
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/util/elideImportEquals.js b/node_modules/sucrase/dist/esm/util/elideImportEquals.js
new file mode 100644
index 0000000..6b18a7a
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/elideImportEquals.js
@@ -0,0 +1,29 @@
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+
+export default function elideImportEquals(tokens) {
+ // import
+ tokens.removeInitialToken();
+ // name
+ tokens.removeToken();
+ // =
+ tokens.removeToken();
+ // name or require
+ tokens.removeToken();
+ // Handle either `import A = require('A')` or `import A = B.C.D`.
+ if (tokens.matches1(tt.parenL)) {
+ // (
+ tokens.removeToken();
+ // path string
+ tokens.removeToken();
+ // )
+ tokens.removeToken();
+ } else {
+ while (tokens.matches1(tt.dot)) {
+ // .
+ tokens.removeToken();
+ // name
+ tokens.removeToken();
+ }
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/util/formatTokens.js b/node_modules/sucrase/dist/esm/util/formatTokens.js
new file mode 100644
index 0000000..eea07d2
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/formatTokens.js
@@ -0,0 +1,74 @@
+import LinesAndColumns from "lines-and-columns";
+
+
+import {formatTokenType} from "../parser/tokenizer/types";
+
+export default function formatTokens(code, tokens) {
+ if (tokens.length === 0) {
+ return "";
+ }
+
+ const tokenKeys = Object.keys(tokens[0]).filter(
+ (k) => k !== "type" && k !== "value" && k !== "start" && k !== "end" && k !== "loc",
+ );
+ const typeKeys = Object.keys(tokens[0].type).filter((k) => k !== "label" && k !== "keyword");
+
+ const headings = ["Location", "Label", "Raw", ...tokenKeys, ...typeKeys];
+
+ const lines = new LinesAndColumns(code);
+ const rows = [headings, ...tokens.map(getTokenComponents)];
+ const padding = headings.map(() => 0);
+ for (const components of rows) {
+ for (let i = 0; i < components.length; i++) {
+ padding[i] = Math.max(padding[i], components[i].length);
+ }
+ }
+ return rows
+ .map((components) => components.map((component, i) => component.padEnd(padding[i])).join(" "))
+ .join("\n");
+
+ function getTokenComponents(token) {
+ const raw = code.slice(token.start, token.end);
+ return [
+ formatRange(token.start, token.end),
+ formatTokenType(token.type),
+ truncate(String(raw), 14),
+ // @ts-ignore: Intentional dynamic access by key.
+ ...tokenKeys.map((key) => formatValue(token[key], key)),
+ // @ts-ignore: Intentional dynamic access by key.
+ ...typeKeys.map((key) => formatValue(token.type[key], key)),
+ ];
+ }
+
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ function formatValue(value, key) {
+ if (value === true) {
+ return key;
+ } else if (value === false || value === null) {
+ return "";
+ } else {
+ return String(value);
+ }
+ }
+
+ function formatRange(start, end) {
+ return `${formatPos(start)}-${formatPos(end)}`;
+ }
+
+ function formatPos(pos) {
+ const location = lines.locationForIndex(pos);
+ if (!location) {
+ return "Unknown";
+ } else {
+ return `${location.line + 1}:${location.column + 1}`;
+ }
+ }
+}
+
+function truncate(s, length) {
+ if (s.length > length) {
+ return `${s.slice(0, length - 3)}...`;
+ } else {
+ return s;
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/util/getClassInfo.js b/node_modules/sucrase/dist/esm/util/getClassInfo.js
new file mode 100644
index 0000000..0100ad6
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/getClassInfo.js
@@ -0,0 +1,352 @@
+
+
+import {ContextualKeyword} from "../parser/tokenizer/keywords";
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * Get information about the class fields for this class, given a token processor pointing to the
+ * open-brace at the start of the class.
+ */
+export default function getClassInfo(
+ rootTransformer,
+ tokens,
+ nameManager,
+ disableESTransforms,
+) {
+ const snapshot = tokens.snapshot();
+
+ const headerInfo = processClassHeader(tokens);
+
+ let constructorInitializerStatements = [];
+ const instanceInitializerNames = [];
+ const staticInitializerNames = [];
+ let constructorInsertPos = null;
+ const fields = [];
+ const rangesToRemove = [];
+
+ const classContextId = tokens.currentToken().contextId;
+ if (classContextId == null) {
+ throw new Error("Expected non-null class context ID on class open-brace.");
+ }
+
+ tokens.nextToken();
+ while (!tokens.matchesContextIdAndLabel(tt.braceR, classContextId)) {
+ if (tokens.matchesContextual(ContextualKeyword._constructor) && !tokens.currentToken().isType) {
+ ({constructorInitializerStatements, constructorInsertPos} = processConstructor(tokens));
+ } else if (tokens.matches1(tt.semi)) {
+ if (!disableESTransforms) {
+ rangesToRemove.push({start: tokens.currentIndex(), end: tokens.currentIndex() + 1});
+ }
+ tokens.nextToken();
+ } else if (tokens.currentToken().isType) {
+ tokens.nextToken();
+ } else {
+ // Either a method or a field. Skip to the identifier part.
+ const statementStartIndex = tokens.currentIndex();
+ let isStatic = false;
+ let isESPrivate = false;
+ let isDeclareOrAbstract = false;
+ while (isAccessModifier(tokens.currentToken())) {
+ if (tokens.matches1(tt._static)) {
+ isStatic = true;
+ }
+ if (tokens.matches1(tt.hash)) {
+ isESPrivate = true;
+ }
+ if (tokens.matches1(tt._declare) || tokens.matches1(tt._abstract)) {
+ isDeclareOrAbstract = true;
+ }
+ tokens.nextToken();
+ }
+ if (isStatic && tokens.matches1(tt.braceL)) {
+ // This is a static block, so don't process it in any special way.
+ skipToNextClassElement(tokens, classContextId);
+ continue;
+ }
+ if (isESPrivate) {
+ // Sucrase doesn't attempt to transpile private fields; just leave them as-is.
+ skipToNextClassElement(tokens, classContextId);
+ continue;
+ }
+ if (
+ tokens.matchesContextual(ContextualKeyword._constructor) &&
+ !tokens.currentToken().isType
+ ) {
+ ({constructorInitializerStatements, constructorInsertPos} = processConstructor(tokens));
+ continue;
+ }
+
+ const nameStartIndex = tokens.currentIndex();
+ skipFieldName(tokens);
+ if (tokens.matches1(tt.lessThan) || tokens.matches1(tt.parenL)) {
+ // This is a method, so nothing to process.
+ skipToNextClassElement(tokens, classContextId);
+ continue;
+ }
+ // There might be a type annotation that we need to skip.
+ while (tokens.currentToken().isType) {
+ tokens.nextToken();
+ }
+ if (tokens.matches1(tt.eq)) {
+ const equalsIndex = tokens.currentIndex();
+ // This is an initializer, so we need to wrap in an initializer method.
+ const valueEnd = tokens.currentToken().rhsEndIndex;
+ if (valueEnd == null) {
+ throw new Error("Expected rhsEndIndex on class field assignment.");
+ }
+ tokens.nextToken();
+ while (tokens.currentIndex() < valueEnd) {
+ rootTransformer.processToken();
+ }
+ let initializerName;
+ if (isStatic) {
+ initializerName = nameManager.claimFreeName("__initStatic");
+ staticInitializerNames.push(initializerName);
+ } else {
+ initializerName = nameManager.claimFreeName("__init");
+ instanceInitializerNames.push(initializerName);
+ }
+ // Fields start at the name, so `static x = 1;` has a field range of `x = 1;`.
+ fields.push({
+ initializerName,
+ equalsIndex,
+ start: nameStartIndex,
+ end: tokens.currentIndex(),
+ });
+ } else if (!disableESTransforms || isDeclareOrAbstract) {
+ // This is a regular field declaration, like `x;`. With the class transform enabled, we just
+ // remove the line so that no output is produced. With the class transform disabled, we
+ // usually want to preserve the declaration (but still strip types), but if the `declare`
+ // or `abstract` keyword is specified, we should remove the line to avoid initializing the
+ // value to undefined.
+ rangesToRemove.push({start: statementStartIndex, end: tokens.currentIndex()});
+ }
+ }
+ }
+
+ tokens.restoreToSnapshot(snapshot);
+ if (disableESTransforms) {
+ // With ES transforms disabled, we don't want to transform regular class
+ // field declarations, and we don't need to do any additional tricks to
+ // reference the constructor for static init, but we still need to transform
+ // TypeScript field initializers defined as constructor parameters and we
+ // still need to remove `declare` fields. For now, we run the same code
+ // path but omit any field information, as if the class had no field
+ // declarations. In the future, when we fully drop the class fields
+ // transform, we can simplify this code significantly.
+ return {
+ headerInfo,
+ constructorInitializerStatements,
+ instanceInitializerNames: [],
+ staticInitializerNames: [],
+ constructorInsertPos,
+ fields: [],
+ rangesToRemove,
+ };
+ } else {
+ return {
+ headerInfo,
+ constructorInitializerStatements,
+ instanceInitializerNames,
+ staticInitializerNames,
+ constructorInsertPos,
+ fields,
+ rangesToRemove,
+ };
+ }
+}
+
+/**
+ * Move the token processor to the next method/field in the class.
+ *
+ * To do that, we seek forward to the next start of a class name (either an open
+ * bracket or an identifier, or the closing curly brace), then seek backward to
+ * include any access modifiers.
+ */
+function skipToNextClassElement(tokens, classContextId) {
+ tokens.nextToken();
+ while (tokens.currentToken().contextId !== classContextId) {
+ tokens.nextToken();
+ }
+ while (isAccessModifier(tokens.tokenAtRelativeIndex(-1))) {
+ tokens.previousToken();
+ }
+}
+
+function processClassHeader(tokens) {
+ const classToken = tokens.currentToken();
+ const contextId = classToken.contextId;
+ if (contextId == null) {
+ throw new Error("Expected context ID on class token.");
+ }
+ const isExpression = classToken.isExpression;
+ if (isExpression == null) {
+ throw new Error("Expected isExpression on class token.");
+ }
+ let className = null;
+ let hasSuperclass = false;
+ tokens.nextToken();
+ if (tokens.matches1(tt.name)) {
+ className = tokens.identifierName();
+ }
+ while (!tokens.matchesContextIdAndLabel(tt.braceL, contextId)) {
+ // If this has a superclass, there will always be an `extends` token. If it doesn't have a
+ // superclass, only type parameters and `implements` clauses can show up here, all of which
+ // consist only of type tokens. A declaration like `class A<B extends C> {` should *not* count
+ // as having a superclass.
+ if (tokens.matches1(tt._extends) && !tokens.currentToken().isType) {
+ hasSuperclass = true;
+ }
+ tokens.nextToken();
+ }
+ return {isExpression, className, hasSuperclass};
+}
+
+/**
+ * Extract useful information out of a constructor, starting at the "constructor" name.
+ */
+function processConstructor(tokens)
+
+
+ {
+ const constructorInitializerStatements = [];
+
+ tokens.nextToken();
+ const constructorContextId = tokens.currentToken().contextId;
+ if (constructorContextId == null) {
+ throw new Error("Expected context ID on open-paren starting constructor params.");
+ }
+ // Advance through parameters looking for access modifiers.
+ while (!tokens.matchesContextIdAndLabel(tt.parenR, constructorContextId)) {
+ if (tokens.currentToken().contextId === constructorContextId) {
+ // Current token is an open paren or comma just before a param, so check
+ // that param for access modifiers.
+ tokens.nextToken();
+ if (isAccessModifier(tokens.currentToken())) {
+ tokens.nextToken();
+ while (isAccessModifier(tokens.currentToken())) {
+ tokens.nextToken();
+ }
+ const token = tokens.currentToken();
+ if (token.type !== tt.name) {
+ throw new Error("Expected identifier after access modifiers in constructor arg.");
+ }
+ const name = tokens.identifierNameForToken(token);
+ constructorInitializerStatements.push(`this.${name} = ${name}`);
+ }
+ } else {
+ tokens.nextToken();
+ }
+ }
+ // )
+ tokens.nextToken();
+ // Constructor type annotations are invalid, but skip them anyway since
+ // they're easy to skip.
+ while (tokens.currentToken().isType) {
+ tokens.nextToken();
+ }
+ let constructorInsertPos = tokens.currentIndex();
+
+ // Advance through body looking for a super call.
+ let foundSuperCall = false;
+ while (!tokens.matchesContextIdAndLabel(tt.braceR, constructorContextId)) {
+ if (!foundSuperCall && tokens.matches2(tt._super, tt.parenL)) {
+ tokens.nextToken();
+ const superCallContextId = tokens.currentToken().contextId;
+ if (superCallContextId == null) {
+ throw new Error("Expected a context ID on the super call");
+ }
+ while (!tokens.matchesContextIdAndLabel(tt.parenR, superCallContextId)) {
+ tokens.nextToken();
+ }
+ constructorInsertPos = tokens.currentIndex();
+ foundSuperCall = true;
+ }
+ tokens.nextToken();
+ }
+ // }
+ tokens.nextToken();
+
+ return {constructorInitializerStatements, constructorInsertPos};
+}
+
+/**
+ * Determine if this is any token that can go before the name in a method/field.
+ */
+function isAccessModifier(token) {
+ return [
+ tt._async,
+ tt._get,
+ tt._set,
+ tt.plus,
+ tt.minus,
+ tt._readonly,
+ tt._static,
+ tt._public,
+ tt._private,
+ tt._protected,
+ tt._override,
+ tt._abstract,
+ tt.star,
+ tt._declare,
+ tt.hash,
+ ].includes(token.type);
+}
+
+/**
+ * The next token or set of tokens is either an identifier or an expression in square brackets, for
+ * a method or field name.
+ */
+function skipFieldName(tokens) {
+ if (tokens.matches1(tt.bracketL)) {
+ const startToken = tokens.currentToken();
+ const classContextId = startToken.contextId;
+ if (classContextId == null) {
+ throw new Error("Expected class context ID on computed name open bracket.");
+ }
+ while (!tokens.matchesContextIdAndLabel(tt.bracketR, classContextId)) {
+ tokens.nextToken();
+ }
+ tokens.nextToken();
+ } else {
+ tokens.nextToken();
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/util/getDeclarationInfo.js b/node_modules/sucrase/dist/esm/util/getDeclarationInfo.js
new file mode 100644
index 0000000..ade9a81
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/getDeclarationInfo.js
@@ -0,0 +1,40 @@
+import {isTopLevelDeclaration} from "../parser/tokenizer";
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+
+
+
+
+
+
+export const EMPTY_DECLARATION_INFO = {
+ typeDeclarations: new Set(),
+ valueDeclarations: new Set(),
+};
+
+/**
+ * Get all top-level identifiers that should be preserved when exported in TypeScript.
+ *
+ * Examples:
+ * - If an identifier is declared as `const x`, then `export {x}` should be preserved.
+ * - If it's declared as `type x`, then `export {x}` should be removed.
+ * - If it's declared as both `const x` and `type x`, then the export should be preserved.
+ * - Classes and enums should be preserved (even though they also introduce types).
+ * - Imported identifiers should be preserved since we don't have enough information to
+ * rule them out. --isolatedModules disallows re-exports, which catches errors here.
+ */
+export default function getDeclarationInfo(tokens) {
+ const typeDeclarations = new Set();
+ const valueDeclarations = new Set();
+ for (let i = 0; i < tokens.tokens.length; i++) {
+ const token = tokens.tokens[i];
+ if (token.type === tt.name && isTopLevelDeclaration(token)) {
+ if (token.isType) {
+ typeDeclarations.add(tokens.identifierNameForToken(token));
+ } else {
+ valueDeclarations.add(tokens.identifierNameForToken(token));
+ }
+ }
+ }
+ return {typeDeclarations, valueDeclarations};
+}
diff --git a/node_modules/sucrase/dist/esm/util/getIdentifierNames.js b/node_modules/sucrase/dist/esm/util/getIdentifierNames.js
new file mode 100644
index 0000000..5b85901
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/getIdentifierNames.js
@@ -0,0 +1,15 @@
+
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+/**
+ * Get all identifier names in the code, in order, including duplicates.
+ */
+export default function getIdentifierNames(code, tokens) {
+ const names = [];
+ for (const token of tokens) {
+ if (token.type === tt.name) {
+ names.push(code.slice(token.start, token.end));
+ }
+ }
+ return names;
+}
diff --git a/node_modules/sucrase/dist/esm/util/getImportExportSpecifierInfo.js b/node_modules/sucrase/dist/esm/util/getImportExportSpecifierInfo.js
new file mode 100644
index 0000000..3dc6d2c
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/getImportExportSpecifierInfo.js
@@ -0,0 +1,92 @@
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * Determine information about this named import or named export specifier.
+ *
+ * This syntax is the `a` from statements like these:
+ * import {A} from "./foo";
+ * export {A};
+ * export {A} from "./foo";
+ *
+ * As it turns out, we can exactly characterize the syntax meaning by simply
+ * counting the number of tokens, which can be from 1 to 4:
+ * {A}
+ * {type A}
+ * {A as B}
+ * {type A as B}
+ *
+ * In the type case, we never actually need the names in practice, so don't get
+ * them.
+ *
+ * TODO: There's some redundancy with the type detection here and the isType
+ * flag that's already present on tokens in TS mode. This function could
+ * potentially be simplified and/or pushed to the call sites to avoid the object
+ * allocation.
+ */
+export default function getImportExportSpecifierInfo(
+ tokens,
+ index = tokens.currentIndex(),
+) {
+ let endIndex = index + 1;
+ if (isSpecifierEnd(tokens, endIndex)) {
+ // import {A}
+ const name = tokens.identifierNameAtIndex(index);
+ return {
+ isType: false,
+ leftName: name,
+ rightName: name,
+ endIndex,
+ };
+ }
+ endIndex++;
+ if (isSpecifierEnd(tokens, endIndex)) {
+ // import {type A}
+ return {
+ isType: true,
+ leftName: null,
+ rightName: null,
+ endIndex,
+ };
+ }
+ endIndex++;
+ if (isSpecifierEnd(tokens, endIndex)) {
+ // import {A as B}
+ return {
+ isType: false,
+ leftName: tokens.identifierNameAtIndex(index),
+ rightName: tokens.identifierNameAtIndex(index + 2),
+ endIndex,
+ };
+ }
+ endIndex++;
+ if (isSpecifierEnd(tokens, endIndex)) {
+ // import {type A as B}
+ return {
+ isType: true,
+ leftName: null,
+ rightName: null,
+ endIndex,
+ };
+ }
+ throw new Error(`Unexpected import/export specifier at ${index}`);
+}
+
+function isSpecifierEnd(tokens, index) {
+ const token = tokens.tokens[index];
+ return token.type === tt.braceR || token.type === tt.comma;
+}
diff --git a/node_modules/sucrase/dist/esm/util/getJSXPragmaInfo.js b/node_modules/sucrase/dist/esm/util/getJSXPragmaInfo.js
new file mode 100644
index 0000000..9972342
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/getJSXPragmaInfo.js
@@ -0,0 +1,22 @@
+
+
+
+
+
+
+
+
+
+export default function getJSXPragmaInfo(options) {
+ const [base, suffix] = splitPragma(options.jsxPragma || "React.createElement");
+ const [fragmentBase, fragmentSuffix] = splitPragma(options.jsxFragmentPragma || "React.Fragment");
+ return {base, suffix, fragmentBase, fragmentSuffix};
+}
+
+function splitPragma(pragma) {
+ let dotIndex = pragma.indexOf(".");
+ if (dotIndex === -1) {
+ dotIndex = pragma.length;
+ }
+ return [pragma.slice(0, dotIndex), pragma.slice(dotIndex)];
+}
diff --git a/node_modules/sucrase/dist/esm/util/getNonTypeIdentifiers.js b/node_modules/sucrase/dist/esm/util/getNonTypeIdentifiers.js
new file mode 100644
index 0000000..24c73dd
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/getNonTypeIdentifiers.js
@@ -0,0 +1,43 @@
+
+import {IdentifierRole} from "../parser/tokenizer";
+import {TokenType, TokenType as tt} from "../parser/tokenizer/types";
+
+import {startsWithLowerCase} from "../transformers/JSXTransformer";
+import getJSXPragmaInfo from "./getJSXPragmaInfo";
+
+export function getNonTypeIdentifiers(tokens, options) {
+ const jsxPragmaInfo = getJSXPragmaInfo(options);
+ const nonTypeIdentifiers = new Set();
+ for (let i = 0; i < tokens.tokens.length; i++) {
+ const token = tokens.tokens[i];
+ if (
+ token.type === tt.name &&
+ !token.isType &&
+ (token.identifierRole === IdentifierRole.Access ||
+ token.identifierRole === IdentifierRole.ObjectShorthand ||
+ token.identifierRole === IdentifierRole.ExportAccess) &&
+ !token.shadowsGlobal
+ ) {
+ nonTypeIdentifiers.add(tokens.identifierNameForToken(token));
+ }
+ if (token.type === tt.jsxTagStart) {
+ nonTypeIdentifiers.add(jsxPragmaInfo.base);
+ }
+ if (
+ token.type === tt.jsxTagStart &&
+ i + 1 < tokens.tokens.length &&
+ tokens.tokens[i + 1].type === tt.jsxTagEnd
+ ) {
+ nonTypeIdentifiers.add(jsxPragmaInfo.base);
+ nonTypeIdentifiers.add(jsxPragmaInfo.fragmentBase);
+ }
+ if (token.type === tt.jsxName && token.identifierRole === IdentifierRole.Access) {
+ const identifierName = tokens.identifierNameForToken(token);
+ // Lower-case single-component tag names like "div" don't count.
+ if (!startsWithLowerCase(identifierName) || tokens.tokens[i + 1].type === TokenType.dot) {
+ nonTypeIdentifiers.add(tokens.identifierNameForToken(token));
+ }
+ }
+ }
+ return nonTypeIdentifiers;
+}
diff --git a/node_modules/sucrase/dist/esm/util/getTSImportedNames.js b/node_modules/sucrase/dist/esm/util/getTSImportedNames.js
new file mode 100644
index 0000000..523181a
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/getTSImportedNames.js
@@ -0,0 +1,84 @@
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+import getImportExportSpecifierInfo from "./getImportExportSpecifierInfo";
+
+/**
+ * Special case code to scan for imported names in ESM TypeScript. We need to do this so we can
+ * properly get globals so we can compute shadowed globals.
+ *
+ * This is similar to logic in CJSImportProcessor, but trimmed down to avoid logic with CJS
+ * replacement and flow type imports.
+ */
+export default function getTSImportedNames(tokens) {
+ const importedNames = new Set();
+ for (let i = 0; i < tokens.tokens.length; i++) {
+ if (
+ tokens.matches1AtIndex(i, tt._import) &&
+ !tokens.matches3AtIndex(i, tt._import, tt.name, tt.eq)
+ ) {
+ collectNamesForImport(tokens, i, importedNames);
+ }
+ }
+ return importedNames;
+}
+
+function collectNamesForImport(
+ tokens,
+ index,
+ importedNames,
+) {
+ index++;
+
+ if (tokens.matches1AtIndex(index, tt.parenL)) {
+ // Dynamic import, so nothing to do
+ return;
+ }
+
+ if (tokens.matches1AtIndex(index, tt.name)) {
+ importedNames.add(tokens.identifierNameAtIndex(index));
+ index++;
+ if (tokens.matches1AtIndex(index, tt.comma)) {
+ index++;
+ }
+ }
+
+ if (tokens.matches1AtIndex(index, tt.star)) {
+ // * as
+ index += 2;
+ importedNames.add(tokens.identifierNameAtIndex(index));
+ index++;
+ }
+
+ if (tokens.matches1AtIndex(index, tt.braceL)) {
+ index++;
+ collectNamesForNamedImport(tokens, index, importedNames);
+ }
+}
+
+function collectNamesForNamedImport(
+ tokens,
+ index,
+ importedNames,
+) {
+ while (true) {
+ if (tokens.matches1AtIndex(index, tt.braceR)) {
+ return;
+ }
+
+ const specifierInfo = getImportExportSpecifierInfo(tokens, index);
+ index = specifierInfo.endIndex;
+ if (!specifierInfo.isType) {
+ importedNames.add(specifierInfo.rightName);
+ }
+
+ if (tokens.matches2AtIndex(index, tt.comma, tt.braceR)) {
+ return;
+ } else if (tokens.matches1AtIndex(index, tt.braceR)) {
+ return;
+ } else if (tokens.matches1AtIndex(index, tt.comma)) {
+ index++;
+ } else {
+ throw new Error(`Unexpected token: ${JSON.stringify(tokens.tokens[index])}`);
+ }
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/util/isAsyncOperation.js b/node_modules/sucrase/dist/esm/util/isAsyncOperation.js
new file mode 100644
index 0000000..af40e9a
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/isAsyncOperation.js
@@ -0,0 +1,38 @@
+import {ContextualKeyword} from "../parser/tokenizer/keywords";
+
+
+/**
+ * Determine whether this optional chain or nullish coalescing operation has any await statements in
+ * it. If so, we'll need to transpile to an async operation.
+ *
+ * We compute this by walking the length of the operation and returning true if we see an await
+ * keyword used as a real await (rather than an object key or property access). Nested optional
+ * chain/nullish operations need to be tracked but don't silence await, but a nested async function
+ * (or any other nested scope) will make the await not count.
+ */
+export default function isAsyncOperation(tokens) {
+ let index = tokens.currentIndex();
+ let depth = 0;
+ const startToken = tokens.currentToken();
+ do {
+ const token = tokens.tokens[index];
+ if (token.isOptionalChainStart) {
+ depth++;
+ }
+ if (token.isOptionalChainEnd) {
+ depth--;
+ }
+ depth += token.numNullishCoalesceStarts;
+ depth -= token.numNullishCoalesceEnds;
+
+ if (
+ token.contextualKeyword === ContextualKeyword._await &&
+ token.identifierRole == null &&
+ token.scopeDepth === startToken.scopeDepth
+ ) {
+ return true;
+ }
+ index += 1;
+ } while (depth > 0 && index < tokens.tokens.length);
+ return false;
+}
diff --git a/node_modules/sucrase/dist/esm/util/isExportFrom.js b/node_modules/sucrase/dist/esm/util/isExportFrom.js
new file mode 100644
index 0000000..fd33665
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/isExportFrom.js
@@ -0,0 +1,18 @@
+import {ContextualKeyword} from "../parser/tokenizer/keywords";
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+
+/**
+ * Starting at `export {`, look ahead and return `true` if this is an
+ * `export {...} from` statement and `false` if this is a plain multi-export.
+ */
+export default function isExportFrom(tokens) {
+ let closeBraceIndex = tokens.currentIndex();
+ while (!tokens.matches1AtIndex(closeBraceIndex, tt.braceR)) {
+ closeBraceIndex++;
+ }
+ return (
+ tokens.matchesContextualAtIndex(closeBraceIndex + 1, ContextualKeyword._from) &&
+ tokens.matches1AtIndex(closeBraceIndex + 2, tt.string)
+ );
+}
diff --git a/node_modules/sucrase/dist/esm/util/isIdentifier.js b/node_modules/sucrase/dist/esm/util/isIdentifier.js
new file mode 100644
index 0000000..4a62ff6
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/isIdentifier.js
@@ -0,0 +1,81 @@
+import {IS_IDENTIFIER_CHAR, IS_IDENTIFIER_START} from "../parser/util/identifier";
+
+// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Lexical_grammar
+// Hard-code a list of reserved words rather than trying to use keywords or contextual keywords
+// from the parser, since currently there are various exceptions, like `package` being reserved
+// but unused and various contextual keywords being reserved. Note that we assume that all code
+// compiled by Sucrase is in a module, so strict mode words and await are all considered reserved
+// here.
+const RESERVED_WORDS = new Set([
+ // Reserved keywords as of ECMAScript 2015
+ "break",
+ "case",
+ "catch",
+ "class",
+ "const",
+ "continue",
+ "debugger",
+ "default",
+ "delete",
+ "do",
+ "else",
+ "export",
+ "extends",
+ "finally",
+ "for",
+ "function",
+ "if",
+ "import",
+ "in",
+ "instanceof",
+ "new",
+ "return",
+ "super",
+ "switch",
+ "this",
+ "throw",
+ "try",
+ "typeof",
+ "var",
+ "void",
+ "while",
+ "with",
+ "yield",
+ // Future reserved keywords
+ "enum",
+ "implements",
+ "interface",
+ "let",
+ "package",
+ "private",
+ "protected",
+ "public",
+ "static",
+ "await",
+ // Literals that cannot be used as identifiers
+ "false",
+ "null",
+ "true",
+]);
+
+/**
+ * Determine if the given name is a legal variable name.
+ *
+ * This is needed when transforming TypeScript enums; if an enum key is a valid
+ * variable name, it might be referenced later in the enum, so we need to
+ * declare a variable.
+ */
+export default function isIdentifier(name) {
+ if (name.length === 0) {
+ return false;
+ }
+ if (!IS_IDENTIFIER_START[name.charCodeAt(0)]) {
+ return false;
+ }
+ for (let i = 1; i < name.length; i++) {
+ if (!IS_IDENTIFIER_CHAR[name.charCodeAt(i)]) {
+ return false;
+ }
+ }
+ return !RESERVED_WORDS.has(name);
+}
diff --git a/node_modules/sucrase/dist/esm/util/removeMaybeImportAttributes.js b/node_modules/sucrase/dist/esm/util/removeMaybeImportAttributes.js
new file mode 100644
index 0000000..abced0e
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/removeMaybeImportAttributes.js
@@ -0,0 +1,22 @@
+import {ContextualKeyword} from "../parser/tokenizer/keywords";
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+
+/**
+ * Starting at a potential `with` or (legacy) `assert` token, remove the import
+ * attributes if they exist.
+ */
+export function removeMaybeImportAttributes(tokens) {
+ if (
+ tokens.matches2(tt._with, tt.braceL) ||
+ (tokens.matches2(tt.name, tt.braceL) && tokens.matchesContextual(ContextualKeyword._assert))
+ ) {
+ // assert
+ tokens.removeToken();
+ // {
+ tokens.removeToken();
+ tokens.removeBalancedCode();
+ // }
+ tokens.removeToken();
+ }
+}
diff --git a/node_modules/sucrase/dist/esm/util/shouldElideDefaultExport.js b/node_modules/sucrase/dist/esm/util/shouldElideDefaultExport.js
new file mode 100644
index 0000000..d3c4693
--- /dev/null
+++ b/node_modules/sucrase/dist/esm/util/shouldElideDefaultExport.js
@@ -0,0 +1,38 @@
+import {TokenType as tt} from "../parser/tokenizer/types";
+
+
+
+/**
+ * Common method sharing code between CJS and ESM cases, since they're the same here.
+ */
+export default function shouldElideDefaultExport(
+ isTypeScriptTransformEnabled,
+ keepUnusedImports,
+ tokens,
+ declarationInfo,
+) {
+ if (!isTypeScriptTransformEnabled || keepUnusedImports) {
+ return false;
+ }
+ const exportToken = tokens.currentToken();
+ if (exportToken.rhsEndIndex == null) {
+ throw new Error("Expected non-null rhsEndIndex on export token.");
+ }
+ // The export must be of the form `export default a` or `export default a;`.
+ const numTokens = exportToken.rhsEndIndex - tokens.currentIndex();
+ if (
+ numTokens !== 3 &&
+ !(numTokens === 4 && tokens.matches1AtIndex(exportToken.rhsEndIndex - 1, tt.semi))
+ ) {
+ return false;
+ }
+ const identifierToken = tokens.tokenAtRelativeIndex(2);
+ if (identifierToken.type !== tt.name) {
+ return false;
+ }
+ const exportedName = tokens.identifierNameForToken(identifierToken);
+ return (
+ declarationInfo.typeDeclarations.has(exportedName) &&
+ !declarationInfo.valueDeclarations.has(exportedName)
+ );
+}