Edit on GitHub

sqlglot.dialects.postgres

  1from __future__ import annotations
  2
  3from sqlglot import exp, tokens
  4from sqlglot.dialects.dialect import Dialect
  5from sqlglot.generators.postgres import PostgresGenerator
  6from sqlglot.parsers.postgres import PostgresParser
  7from sqlglot.tokens import TokenType
  8
  9
 10class Postgres(Dialect):
 11    INDEX_OFFSET = 1
 12    TYPED_DIVISION = True
 13    CONCAT_COALESCE = True
 14    NULL_ORDERING = "nulls_are_large"
 15    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 16    TABLESAMPLE_SIZE_IS_PERCENT = True
 17    TABLES_REFERENCEABLE_AS_COLUMNS = True
 18
 19    DEFAULT_FUNCTIONS_COLUMN_NAMES = {
 20        exp.ExplodingGenerateSeries: "generate_series",
 21    }
 22
 23    TIME_MAPPING = {
 24        "d": "%u",  # 1-based day of week
 25        "D": "%u",  # 1-based day of week
 26        "dd": "%d",  # day of month
 27        "DD": "%d",  # day of month
 28        "ddd": "%j",  # zero padded day of year
 29        "DDD": "%j",  # zero padded day of year
 30        "FMDD": "%-d",  # - is no leading zero for Python; same for FM in postgres
 31        "FMDDD": "%-j",  # day of year
 32        "FMHH12": "%-I",  # 9
 33        "FMHH24": "%-H",  # 9
 34        "FMMI": "%-M",  # Minute
 35        "FMMM": "%-m",  # 1
 36        "FMSS": "%-S",  # Second
 37        "HH12": "%I",  # 09
 38        "HH24": "%H",  # 09
 39        "mi": "%M",  # zero padded minute
 40        "MI": "%M",  # zero padded minute
 41        "mm": "%m",  # 01
 42        "MM": "%m",  # 01
 43        "OF": "%z",  # utc offset
 44        "ss": "%S",  # zero padded second
 45        "SS": "%S",  # zero padded second
 46        "TMDay": "%A",  # TM is locale dependent
 47        "TMDy": "%a",
 48        "TMMon": "%b",  # Sep
 49        "TMMonth": "%B",  # September
 50        "TZ": "%Z",  # uppercase timezone name
 51        "US": "%f",  # zero padded microsecond
 52        "ww": "%U",  # 1-based week of year
 53        "WW": "%U",  # 1-based week of year
 54        "yy": "%y",  # 15
 55        "YY": "%y",  # 15
 56        "yyyy": "%Y",  # 2015
 57        "YYYY": "%Y",  # 2015
 58    }
 59
 60    class Tokenizer(tokens.Tokenizer):
 61        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
 62        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 63        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
 64        BYTE_STRING_ESCAPES = ["'", "\\"]
 65        HEREDOC_STRINGS = ["$"]
 66
 67        HEREDOC_TAG_IS_IDENTIFIER = True
 68        HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER
 69
 70        KEYWORDS = {
 71            **tokens.Tokenizer.KEYWORDS,
 72            "~": TokenType.RLIKE,
 73            "@@": TokenType.DAT,
 74            "@>": TokenType.AT_GT,
 75            "<@": TokenType.LT_AT,
 76            "?&": TokenType.QMARK_AMP,
 77            "?|": TokenType.QMARK_PIPE,
 78            "#-": TokenType.HASH_DASH,
 79            "|/": TokenType.PIPE_SLASH,
 80            "||/": TokenType.DPIPE_SLASH,
 81            "BEGIN": TokenType.BEGIN,
 82            "BIGSERIAL": TokenType.BIGSERIAL,
 83            "CSTRING": TokenType.PSEUDO_TYPE,
 84            "DECLARE": TokenType.COMMAND,
 85            "DO": TokenType.COMMAND,
 86            "EXEC": TokenType.COMMAND,
 87            "HSTORE": TokenType.HSTORE,
 88            "INT8": TokenType.BIGINT,
 89            "MONEY": TokenType.MONEY,
 90            "NAME": TokenType.NAME,
 91            "OID": TokenType.OBJECT_IDENTIFIER,
 92            "ONLY": TokenType.ONLY,
 93            "POINT": TokenType.POINT,
 94            "REFRESH": TokenType.COMMAND,
 95            "REINDEX": TokenType.COMMAND,
 96            "RESET": TokenType.COMMAND,
 97            "SERIAL": TokenType.SERIAL,
 98            "SMALLSERIAL": TokenType.SMALLSERIAL,
 99            "TEMP": TokenType.TEMPORARY,
100            "REGCLASS": TokenType.OBJECT_IDENTIFIER,
101            "REGCOLLATION": TokenType.OBJECT_IDENTIFIER,
102            "REGCONFIG": TokenType.OBJECT_IDENTIFIER,
103            "REGDICTIONARY": TokenType.OBJECT_IDENTIFIER,
104            "REGNAMESPACE": TokenType.OBJECT_IDENTIFIER,
105            "REGOPER": TokenType.OBJECT_IDENTIFIER,
106            "REGOPERATOR": TokenType.OBJECT_IDENTIFIER,
107            "REGPROC": TokenType.OBJECT_IDENTIFIER,
108            "REGPROCEDURE": TokenType.OBJECT_IDENTIFIER,
109            "REGROLE": TokenType.OBJECT_IDENTIFIER,
110            "REGTYPE": TokenType.OBJECT_IDENTIFIER,
111            "FLOAT": TokenType.DOUBLE,
112            "XML": TokenType.XML,
113            "VARIADIC": TokenType.VARIADIC,
114            "INOUT": TokenType.INOUT,
115        }
116        KEYWORDS.pop("/*+")
117        KEYWORDS.pop("DIV")
118
119        SINGLE_TOKENS = {
120            **tokens.Tokenizer.SINGLE_TOKENS,
121            "$": TokenType.HEREDOC_STRING,
122        }
123
124        VAR_SINGLE_TOKENS = {"$"}
125
126    Parser = PostgresParser
127
128    Generator = PostgresGenerator
class Postgres(sqlglot.dialects.dialect.Dialect):
 11class Postgres(Dialect):
 12    INDEX_OFFSET = 1
 13    TYPED_DIVISION = True
 14    CONCAT_COALESCE = True
 15    NULL_ORDERING = "nulls_are_large"
 16    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 17    TABLESAMPLE_SIZE_IS_PERCENT = True
 18    TABLES_REFERENCEABLE_AS_COLUMNS = True
 19
 20    DEFAULT_FUNCTIONS_COLUMN_NAMES = {
 21        exp.ExplodingGenerateSeries: "generate_series",
 22    }
 23
 24    TIME_MAPPING = {
 25        "d": "%u",  # 1-based day of week
 26        "D": "%u",  # 1-based day of week
 27        "dd": "%d",  # day of month
 28        "DD": "%d",  # day of month
 29        "ddd": "%j",  # zero padded day of year
 30        "DDD": "%j",  # zero padded day of year
 31        "FMDD": "%-d",  # - is no leading zero for Python; same for FM in postgres
 32        "FMDDD": "%-j",  # day of year
 33        "FMHH12": "%-I",  # 9
 34        "FMHH24": "%-H",  # 9
 35        "FMMI": "%-M",  # Minute
 36        "FMMM": "%-m",  # 1
 37        "FMSS": "%-S",  # Second
 38        "HH12": "%I",  # 09
 39        "HH24": "%H",  # 09
 40        "mi": "%M",  # zero padded minute
 41        "MI": "%M",  # zero padded minute
 42        "mm": "%m",  # 01
 43        "MM": "%m",  # 01
 44        "OF": "%z",  # utc offset
 45        "ss": "%S",  # zero padded second
 46        "SS": "%S",  # zero padded second
 47        "TMDay": "%A",  # TM is locale dependent
 48        "TMDy": "%a",
 49        "TMMon": "%b",  # Sep
 50        "TMMonth": "%B",  # September
 51        "TZ": "%Z",  # uppercase timezone name
 52        "US": "%f",  # zero padded microsecond
 53        "ww": "%U",  # 1-based week of year
 54        "WW": "%U",  # 1-based week of year
 55        "yy": "%y",  # 15
 56        "YY": "%y",  # 15
 57        "yyyy": "%Y",  # 2015
 58        "YYYY": "%Y",  # 2015
 59    }
 60
 61    class Tokenizer(tokens.Tokenizer):
 62        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
 63        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 64        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
 65        BYTE_STRING_ESCAPES = ["'", "\\"]
 66        HEREDOC_STRINGS = ["$"]
 67
 68        HEREDOC_TAG_IS_IDENTIFIER = True
 69        HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER
 70
 71        KEYWORDS = {
 72            **tokens.Tokenizer.KEYWORDS,
 73            "~": TokenType.RLIKE,
 74            "@@": TokenType.DAT,
 75            "@>": TokenType.AT_GT,
 76            "<@": TokenType.LT_AT,
 77            "?&": TokenType.QMARK_AMP,
 78            "?|": TokenType.QMARK_PIPE,
 79            "#-": TokenType.HASH_DASH,
 80            "|/": TokenType.PIPE_SLASH,
 81            "||/": TokenType.DPIPE_SLASH,
 82            "BEGIN": TokenType.BEGIN,
 83            "BIGSERIAL": TokenType.BIGSERIAL,
 84            "CSTRING": TokenType.PSEUDO_TYPE,
 85            "DECLARE": TokenType.COMMAND,
 86            "DO": TokenType.COMMAND,
 87            "EXEC": TokenType.COMMAND,
 88            "HSTORE": TokenType.HSTORE,
 89            "INT8": TokenType.BIGINT,
 90            "MONEY": TokenType.MONEY,
 91            "NAME": TokenType.NAME,
 92            "OID": TokenType.OBJECT_IDENTIFIER,
 93            "ONLY": TokenType.ONLY,
 94            "POINT": TokenType.POINT,
 95            "REFRESH": TokenType.COMMAND,
 96            "REINDEX": TokenType.COMMAND,
 97            "RESET": TokenType.COMMAND,
 98            "SERIAL": TokenType.SERIAL,
 99            "SMALLSERIAL": TokenType.SMALLSERIAL,
100            "TEMP": TokenType.TEMPORARY,
101            "REGCLASS": TokenType.OBJECT_IDENTIFIER,
102            "REGCOLLATION": TokenType.OBJECT_IDENTIFIER,
103            "REGCONFIG": TokenType.OBJECT_IDENTIFIER,
104            "REGDICTIONARY": TokenType.OBJECT_IDENTIFIER,
105            "REGNAMESPACE": TokenType.OBJECT_IDENTIFIER,
106            "REGOPER": TokenType.OBJECT_IDENTIFIER,
107            "REGOPERATOR": TokenType.OBJECT_IDENTIFIER,
108            "REGPROC": TokenType.OBJECT_IDENTIFIER,
109            "REGPROCEDURE": TokenType.OBJECT_IDENTIFIER,
110            "REGROLE": TokenType.OBJECT_IDENTIFIER,
111            "REGTYPE": TokenType.OBJECT_IDENTIFIER,
112            "FLOAT": TokenType.DOUBLE,
113            "XML": TokenType.XML,
114            "VARIADIC": TokenType.VARIADIC,
115            "INOUT": TokenType.INOUT,
116        }
117        KEYWORDS.pop("/*+")
118        KEYWORDS.pop("DIV")
119
120        SINGLE_TOKENS = {
121            **tokens.Tokenizer.SINGLE_TOKENS,
122            "$": TokenType.HEREDOC_STRING,
123        }
124
125        VAR_SINGLE_TOKENS = {"$"}
126
127    Parser = PostgresParser
128
129    Generator = PostgresGenerator
INDEX_OFFSET = 1

The base index offset for arrays.

TYPED_DIVISION = True

Whether the behavior of a / b depends on the types of a and b. False means a / b is always float division. True means a / b is integer division if both a and b are integers.

CONCAT_COALESCE = True

A NULL arg in CONCAT yields NULL by default, but in some dialects it yields an empty string.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

TABLES_REFERENCEABLE_AS_COLUMNS = True

Whether table names can be referenced as columns (treated as structs).

BigQuery allows tables to be referenced as columns in queries, automatically treating them as struct values containing all the table's columns.

For example, in BigQuery: SELECT t FROM my_table AS t -- Returns entire row as a struct

DEFAULT_FUNCTIONS_COLUMN_NAMES: dict[type[sqlglot.expressions.core.Func], str | tuple[str, ...]] = {<class 'sqlglot.expressions.array.ExplodingGenerateSeries'>: 'generate_series'}

Maps function expressions to their default output column name(s).

For example, in Postgres, generate_series function outputs a column named "generate_series" by default, so we map the ExplodingGenerateSeries expression to "generate_series" string.

TIME_MAPPING: dict[str, str] = {'d': '%u', 'D': '%u', 'dd': '%d', 'DD': '%d', 'ddd': '%j', 'DDD': '%j', 'FMDD': '%-d', 'FMDDD': '%-j', 'FMHH12': '%-I', 'FMHH24': '%-H', 'FMMI': '%-M', 'FMMM': '%-m', 'FMSS': '%-S', 'HH12': '%I', 'HH24': '%H', 'mi': '%M', 'MI': '%M', 'mm': '%m', 'MM': '%m', 'OF': '%z', 'ss': '%S', 'SS': '%S', 'TMDay': '%A', 'TMDy': '%a', 'TMMon': '%b', 'TMMonth': '%B', 'TZ': '%Z', 'US': '%f', 'ww': '%U', 'WW': '%U', 'yy': '%y', 'YY': '%y', 'yyyy': '%Y', 'YYYY': '%Y'}

Associates this dialect's time formats with their equivalent Python strftime formats.

SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

STRINGS_SUPPORT_ESCAPED_SEQUENCES: bool = False

Whether string literals support escape sequences (e.g. \n). Set by the metaclass based on the tokenizer's STRING_ESCAPES.

BYTE_STRINGS_SUPPORT_ESCAPED_SEQUENCES: bool = True

Whether byte string literals support escape sequences. Set by the metaclass based on the tokenizer's BYTE_STRING_ESCAPES.

INITCAP_SUPPORTS_CUSTOM_DELIMITERS = False
tokenizer_class = <class 'Postgres.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.dialects.dialect.JSONPathTokenizer'>
parser_class = <class 'sqlglot.parsers.postgres.PostgresParser'>
generator_class = <class 'sqlglot.generators.postgres.PostgresGenerator'>
TIME_TRIE: dict = {'d': {0: True, 'd': {0: True, 'd': {0: True}}}, 'D': {0: True, 'D': {0: True, 'D': {0: True}}}, 'F': {'M': {'D': {'D': {0: True, 'D': {0: True}}}, 'H': {'H': {'1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'M': {'I': {0: True}, 'M': {0: True}}, 'S': {'S': {0: True}}}}, 'H': {'H': {'1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'm': {'i': {0: True}, 'm': {0: True}}, 'M': {'I': {0: True}, 'M': {0: True}}, 'O': {'F': {0: True}}, 's': {'s': {0: True}}, 'S': {'S': {0: True}}, 'T': {'M': {'D': {'a': {'y': {0: True}}, 'y': {0: True}}, 'M': {'o': {'n': {0: True, 't': {'h': {0: True}}}}}}, 'Z': {0: True}}, 'U': {'S': {0: True}}, 'w': {'w': {0: True}}, 'W': {'W': {0: True}}, 'y': {'y': {0: True, 'y': {'y': {0: True}}}}, 'Y': {'Y': {0: True, 'Y': {'Y': {0: True}}}}}
FORMAT_TRIE: dict = {'d': {0: True, 'd': {0: True, 'd': {0: True}}}, 'D': {0: True, 'D': {0: True, 'D': {0: True}}}, 'F': {'M': {'D': {'D': {0: True, 'D': {0: True}}}, 'H': {'H': {'1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'M': {'I': {0: True}, 'M': {0: True}}, 'S': {'S': {0: True}}}}, 'H': {'H': {'1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'm': {'i': {0: True}, 'm': {0: True}}, 'M': {'I': {0: True}, 'M': {0: True}}, 'O': {'F': {0: True}}, 's': {'s': {0: True}}, 'S': {'S': {0: True}}, 'T': {'M': {'D': {'a': {'y': {0: True}}, 'y': {0: True}}, 'M': {'o': {'n': {0: True, 't': {'h': {0: True}}}}}}, 'Z': {0: True}}, 'U': {'S': {0: True}}, 'w': {'w': {0: True}}, 'W': {'W': {0: True}}, 'y': {'y': {0: True, 'y': {'y': {0: True}}}}, 'Y': {'Y': {0: True, 'Y': {'Y': {0: True}}}}}
INVERSE_TIME_MAPPING: dict[str, str] = {'%u': 'D', '%d': 'DD', '%j': 'DDD', '%-d': 'FMDD', '%-j': 'FMDDD', '%-I': 'FMHH12', '%-H': 'FMHH24', '%-M': 'FMMI', '%-m': 'FMMM', '%-S': 'FMSS', '%I': 'HH12', '%H': 'HH24', '%M': 'MI', '%m': 'MM', '%z': 'OF', '%S': 'SS', '%A': 'TMDay', '%a': 'TMDy', '%b': 'TMMon', '%B': 'TMMonth', '%Z': 'TZ', '%f': 'US', '%U': 'WW', '%y': 'YY', '%Y': 'YYYY'}
INVERSE_TIME_TRIE: dict = {'%': {'u': {0: True}, 'd': {0: True}, 'j': {0: True}, '-': {'d': {0: True}, 'j': {0: True}, 'I': {0: True}, 'H': {0: True}, 'M': {0: True}, 'm': {0: True}, 'S': {0: True}}, 'I': {0: True}, 'H': {0: True}, 'M': {0: True}, 'm': {0: True}, 'z': {0: True}, 'S': {0: True}, 'A': {0: True}, 'a': {0: True}, 'b': {0: True}, 'B': {0: True}, 'Z': {0: True}, 'f': {0: True}, 'U': {0: True}, 'y': {0: True}, 'Y': {0: True}}}
INVERSE_FORMAT_MAPPING: dict[str, str] = {}
INVERSE_FORMAT_TRIE: dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
VALID_INTERVAL_UNITS: set[str] = {'QTRS', 'TZH', 'HR', 'DAYS', 'US', 'NSEC', 'MICROSECS', 'WK', 'NSECOND', 'MSECONDS', 'WOY', 'MINUTE', 'WEEKDAY', 'MILLISEC', 'MILLISECS', 'MONS', 'MONTHS', 'NSECONDS', 'USECS', 'Y', 'YEAR', 'DAYOFWEEK', 'SECONDS', 'QTR', 'YYYY', 'MILLISECONDS', 'DECADE', 'MM', 'MSECS', 'HOUR', 'DAYOFWEEK_ISO', 'MICROSEC', 'CENTS', 'MS', 'DAYOFWEEKISO', 'MICROSECONDS', 'MILLISECOND', 'USECONDS', 'SECS', 'EPOCH_NANOSECONDS', 'QUARTER', 'USEC', 'HH', 'YRS', 'WEEK', 'EPOCH_MICROSECOND', 'DECADES', 'WEEKOFYEAR', 'WY', 'NANOSECOND', 'DOW', 'SECOND', 'SEC', 'MIL', 'CENTURY', 'HRS', 'DAYOFYEAR', 'YY', 'NANOSEC', 'NANOSECS', 'M', 'WEEKOFYEARISO', 'YEARS', 'Q', 'MILLENNIUM', 'WEEKISO', 'DAY OF WEEK', 'WEEK_ISO', 'MILS', 'MINS', 'NS', 'W', 'EPOCH', 'D', 'DAYOFMONTH', 'DAY', 'MILLISECON', 'CENTURIES', 'S', 'QUARTERS', 'WEEKDAY_ISO', 'WEEKOFYEAR_ISO', 'DEC', 'DW_ISO', 'DY', 'C', 'MONTH', 'YR', 'DW', 'YYY', 'EPOCH_SECOND', 'MIN', 'DD', 'DOW_ISO', 'MI', 'EPOCH_MICROSECONDS', 'MSEC', 'DAY OF YEAR', 'TZM', 'MINUTES', 'MON', 'MSECOND', 'DOY', 'EPOCH_MILLISECONDS', 'MILLENIA', 'EPOCH_NANOSECOND', 'EPOCH_MILLISECOND', 'MICROSECOND', 'EPOCH_SECONDS', 'TIMEZONE_HOUR', 'H', 'HOURS', 'TIMEZONE_MINUTE', 'CENT', 'DECS', 'USECOND'}
BIT_START: str | None = "b'"
BIT_END: str | None = "'"
HEX_START: str | None = "x'"
HEX_END: str | None = "'"
BYTE_START: str | None = "e'"
BYTE_END: str | None = "'"
UNICODE_START: str | None = None
UNICODE_END: str | None = None
class Postgres.Tokenizer(sqlglot.tokens.Tokenizer):
 61    class Tokenizer(tokens.Tokenizer):
 62        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
 63        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 64        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
 65        BYTE_STRING_ESCAPES = ["'", "\\"]
 66        HEREDOC_STRINGS = ["$"]
 67
 68        HEREDOC_TAG_IS_IDENTIFIER = True
 69        HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER
 70
 71        KEYWORDS = {
 72            **tokens.Tokenizer.KEYWORDS,
 73            "~": TokenType.RLIKE,
 74            "@@": TokenType.DAT,
 75            "@>": TokenType.AT_GT,
 76            "<@": TokenType.LT_AT,
 77            "?&": TokenType.QMARK_AMP,
 78            "?|": TokenType.QMARK_PIPE,
 79            "#-": TokenType.HASH_DASH,
 80            "|/": TokenType.PIPE_SLASH,
 81            "||/": TokenType.DPIPE_SLASH,
 82            "BEGIN": TokenType.BEGIN,
 83            "BIGSERIAL": TokenType.BIGSERIAL,
 84            "CSTRING": TokenType.PSEUDO_TYPE,
 85            "DECLARE": TokenType.COMMAND,
 86            "DO": TokenType.COMMAND,
 87            "EXEC": TokenType.COMMAND,
 88            "HSTORE": TokenType.HSTORE,
 89            "INT8": TokenType.BIGINT,
 90            "MONEY": TokenType.MONEY,
 91            "NAME": TokenType.NAME,
 92            "OID": TokenType.OBJECT_IDENTIFIER,
 93            "ONLY": TokenType.ONLY,
 94            "POINT": TokenType.POINT,
 95            "REFRESH": TokenType.COMMAND,
 96            "REINDEX": TokenType.COMMAND,
 97            "RESET": TokenType.COMMAND,
 98            "SERIAL": TokenType.SERIAL,
 99            "SMALLSERIAL": TokenType.SMALLSERIAL,
100            "TEMP": TokenType.TEMPORARY,
101            "REGCLASS": TokenType.OBJECT_IDENTIFIER,
102            "REGCOLLATION": TokenType.OBJECT_IDENTIFIER,
103            "REGCONFIG": TokenType.OBJECT_IDENTIFIER,
104            "REGDICTIONARY": TokenType.OBJECT_IDENTIFIER,
105            "REGNAMESPACE": TokenType.OBJECT_IDENTIFIER,
106            "REGOPER": TokenType.OBJECT_IDENTIFIER,
107            "REGOPERATOR": TokenType.OBJECT_IDENTIFIER,
108            "REGPROC": TokenType.OBJECT_IDENTIFIER,
109            "REGPROCEDURE": TokenType.OBJECT_IDENTIFIER,
110            "REGROLE": TokenType.OBJECT_IDENTIFIER,
111            "REGTYPE": TokenType.OBJECT_IDENTIFIER,
112            "FLOAT": TokenType.DOUBLE,
113            "XML": TokenType.XML,
114            "VARIADIC": TokenType.VARIADIC,
115            "INOUT": TokenType.INOUT,
116        }
117        KEYWORDS.pop("/*+")
118        KEYWORDS.pop("DIV")
119
120        SINGLE_TOKENS = {
121            **tokens.Tokenizer.SINGLE_TOKENS,
122            "$": TokenType.HEREDOC_STRING,
123        }
124
125        VAR_SINGLE_TOKENS = {"$"}
BIT_STRINGS = [("b'", "'"), ("B'", "'")]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
BYTE_STRING_ESCAPES = ["'", '\\']
HEREDOC_STRINGS = ['$']
HEREDOC_TAG_IS_IDENTIFIER = True
HEREDOC_STRING_ALTERNATIVE = <TokenType.PARAMETER: 56>
KEYWORDS = {'{%': <TokenType.BLOCK_START: 71>, '{%+': <TokenType.BLOCK_START: 71>, '{%-': <TokenType.BLOCK_START: 71>, '%}': <TokenType.BLOCK_END: 72>, '+%}': <TokenType.BLOCK_END: 72>, '-%}': <TokenType.BLOCK_END: 72>, '{{+': <TokenType.BLOCK_START: 71>, '{{-': <TokenType.BLOCK_START: 71>, '+}}': <TokenType.BLOCK_END: 72>, '-}}': <TokenType.BLOCK_END: 72>, '&<': <TokenType.AMP_LT: 61>, '&>': <TokenType.AMP_GT: 62>, '==': <TokenType.EQ: 28>, '::': <TokenType.DCOLON: 14>, '?::': <TokenType.QDCOLON: 366>, '||': <TokenType.DPIPE: 37>, '|>': <TokenType.PIPE_GT: 38>, '>=': <TokenType.GTE: 26>, '<=': <TokenType.LTE: 24>, '<>': <TokenType.NEQ: 29>, '!=': <TokenType.NEQ: 29>, ':=': <TokenType.COLON_EQ: 31>, '<=>': <TokenType.NULLSAFE_EQ: 30>, '->': <TokenType.ARROW: 45>, '->>': <TokenType.DARROW: 46>, '=>': <TokenType.FARROW: 47>, '#>': <TokenType.HASH_ARROW: 49>, '#>>': <TokenType.DHASH_ARROW: 50>, '<->': <TokenType.LR_ARROW: 51>, '&&': <TokenType.DAMP: 60>, '??': <TokenType.DQMARK: 18>, '~~~': <TokenType.GLOB: 284>, '~~': <TokenType.LIKE: 315>, '~~*': <TokenType.ILIKE: 292>, '~*': <TokenType.IRLIKE: 304>, '-|-': <TokenType.ADJACENT: 63>, 'ALL': <TokenType.ALL: 218>, 'AND': <TokenType.AND: 34>, 'ANTI': <TokenType.ANTI: 219>, 'ANY': <TokenType.ANY: 220>, 'ASC': <TokenType.ASC: 223>, 'AS': <TokenType.ALIAS: 216>, 'ASOF': <TokenType.ASOF: 224>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 226>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 226>, 'BEGIN': <TokenType.BEGIN: 227>, 'BETWEEN': <TokenType.BETWEEN: 228>, 'CACHE': <TokenType.CACHE: 230>, 'UNCACHE': <TokenType.UNCACHE: 409>, 'CASE': <TokenType.CASE: 231>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 232>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 233>, 'COLLATE': <TokenType.COLLATE: 234>, 'COLUMN': <TokenType.COLUMN: 79>, 'COMMIT': <TokenType.COMMIT: 237>, 'CONNECT BY': <TokenType.CONNECT_BY: 238>, 'CONSTRAINT': <TokenType.CONSTRAINT: 239>, 'COPY': <TokenType.COPY: 240>, 'CREATE': <TokenType.CREATE: 241>, 'CROSS': <TokenType.CROSS: 242>, 'CUBE': <TokenType.CUBE: 243>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 244>, 'CURRENT_SCHEMA': <TokenType.CURRENT_SCHEMA: 246>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 247>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 248>, 'CURRENT_USER': <TokenType.CURRENT_USER: 249>, 'CURRENT_CATALOG': <TokenType.CURRENT_CATALOG: 251>, 'DATABASE': <TokenType.DATABASE: 78>, 'DEFAULT': <TokenType.DEFAULT: 253>, 'DELETE': <TokenType.DELETE: 254>, 'DESC': <TokenType.DESC: 255>, 'DESCRIBE': <TokenType.DESCRIBE: 256>, 'DISTINCT': <TokenType.DISTINCT: 259>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 260>, 'DROP': <TokenType.DROP: 262>, 'ELSE': <TokenType.ELSE: 263>, 'END': <TokenType.END: 264>, 'ENUM': <TokenType.ENUM: 203>, 'ESCAPE': <TokenType.ESCAPE: 265>, 'EXCEPT': <TokenType.EXCEPT: 266>, 'EXECUTE': <TokenType.EXECUTE: 267>, 'EXISTS': <TokenType.EXISTS: 268>, 'FALSE': <TokenType.FALSE: 269>, 'FETCH': <TokenType.FETCH: 270>, 'FILTER': <TokenType.FILTER: 273>, 'FILE': <TokenType.FILE: 271>, 'FIRST': <TokenType.FIRST: 275>, 'FULL': <TokenType.FULL: 281>, 'FUNCTION': <TokenType.FUNCTION: 282>, 'FOR': <TokenType.FOR: 276>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 278>, 'FORMAT': <TokenType.FORMAT: 279>, 'FROM': <TokenType.FROM: 280>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 170>, 'GEOMETRY': <TokenType.GEOMETRY: 173>, 'GLOB': <TokenType.GLOB: 284>, 'GROUP BY': <TokenType.GROUP_BY: 287>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 288>, 'HAVING': <TokenType.HAVING: 289>, 'ILIKE': <TokenType.ILIKE: 292>, 'IN': <TokenType.IN: 293>, 'INDEX': <TokenType.INDEX: 294>, 'INET': <TokenType.INET: 198>, 'INNER': <TokenType.INNER: 296>, 'INSERT': <TokenType.INSERT: 297>, 'INTERVAL': <TokenType.INTERVAL: 301>, 'INTERSECT': <TokenType.INTERSECT: 300>, 'INTO': <TokenType.INTO: 302>, 'IS': <TokenType.IS: 305>, 'ISNULL': <TokenType.ISNULL: 306>, 'JOIN': <TokenType.JOIN: 307>, 'KEEP': <TokenType.KEEP: 309>, 'KILL': <TokenType.KILL: 311>, 'LATERAL': <TokenType.LATERAL: 313>, 'LEFT': <TokenType.LEFT: 314>, 'LIKE': <TokenType.LIKE: 315>, 'LIMIT': <TokenType.LIMIT: 316>, 'LOAD': <TokenType.LOAD: 318>, 'LOCALTIME': <TokenType.LOCALTIME: 177>, 'LOCALTIMESTAMP': <TokenType.LOCALTIMESTAMP: 178>, 'LOCK': <TokenType.LOCK: 319>, 'MERGE': <TokenType.MERGE: 325>, 'NAMESPACE': <TokenType.NAMESPACE: 436>, 'NATURAL': <TokenType.NATURAL: 328>, 'NEXT': <TokenType.NEXT: 329>, 'NOT': <TokenType.NOT: 27>, 'NOTNULL': <TokenType.NOTNULL: 331>, 'NULL': <TokenType.NULL: 332>, 'OBJECT': <TokenType.OBJECT: 197>, 'OFFSET': <TokenType.OFFSET: 334>, 'ON': <TokenType.ON: 335>, 'OR': <TokenType.OR: 35>, 'XOR': <TokenType.XOR: 64>, 'ORDER BY': <TokenType.ORDER_BY: 338>, 'ORDINALITY': <TokenType.ORDINALITY: 341>, 'OUT': <TokenType.OUT: 342>, 'OUTER': <TokenType.OUTER: 344>, 'OVER': <TokenType.OVER: 345>, 'OVERLAPS': <TokenType.OVERLAPS: 346>, 'OVERWRITE': <TokenType.OVERWRITE: 347>, 'PARTITION': <TokenType.PARTITION: 349>, 'PARTITION BY': <TokenType.PARTITION_BY: 350>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 350>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 350>, 'PERCENT': <TokenType.PERCENT: 351>, 'PIVOT': <TokenType.PIVOT: 352>, 'PRAGMA': <TokenType.PRAGMA: 357>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 359>, 'PROCEDURE': <TokenType.PROCEDURE: 360>, 'OPERATOR': <TokenType.OPERATOR: 337>, 'QUALIFY': <TokenType.QUALIFY: 364>, 'RANGE': <TokenType.RANGE: 367>, 'RECURSIVE': <TokenType.RECURSIVE: 368>, 'REGEXP': <TokenType.RLIKE: 376>, 'RENAME': <TokenType.RENAME: 370>, 'REPLACE': <TokenType.REPLACE: 371>, 'RETURNING': <TokenType.RETURNING: 372>, 'REFERENCES': <TokenType.REFERENCES: 374>, 'RIGHT': <TokenType.RIGHT: 375>, 'RLIKE': <TokenType.RLIKE: 376>, 'ROLLBACK': <TokenType.ROLLBACK: 378>, 'ROLLUP': <TokenType.ROLLUP: 379>, 'ROW': <TokenType.ROW: 380>, 'ROWS': <TokenType.ROWS: 381>, 'SCHEMA': <TokenType.SCHEMA: 81>, 'SELECT': <TokenType.SELECT: 383>, 'SEMI': <TokenType.SEMI: 384>, 'SESSION': <TokenType.SESSION: 57>, 'SESSION_USER': <TokenType.SESSION_USER: 59>, 'SET': <TokenType.SET: 388>, 'SETTINGS': <TokenType.SETTINGS: 389>, 'SHOW': <TokenType.SHOW: 390>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 391>, 'SOME': <TokenType.SOME: 392>, 'SORT BY': <TokenType.SORT_BY: 393>, 'SQL SECURITY': <TokenType.SQL_SECURITY: 395>, 'START WITH': <TokenType.START_WITH: 396>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 398>, 'TABLE': <TokenType.TABLE: 82>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 401>, 'TEMP': <TokenType.TEMPORARY: 403>, 'TEMPORARY': <TokenType.TEMPORARY: 403>, 'THEN': <TokenType.THEN: 405>, 'TRUE': <TokenType.TRUE: 406>, 'TRUNCATE': <TokenType.TRUNCATE: 407>, 'TRIGGER': <TokenType.TRIGGER: 408>, 'UNION': <TokenType.UNION: 410>, 'UNKNOWN': <TokenType.UNKNOWN: 212>, 'UNNEST': <TokenType.UNNEST: 411>, 'UNPIVOT': <TokenType.UNPIVOT: 412>, 'UPDATE': <TokenType.UPDATE: 413>, 'USE': <TokenType.USE: 414>, 'USING': <TokenType.USING: 415>, 'UUID': <TokenType.UUID: 169>, 'VALUES': <TokenType.VALUES: 416>, 'VIEW': <TokenType.VIEW: 418>, 'VOLATILE': <TokenType.VOLATILE: 420>, 'WHEN': <TokenType.WHEN: 422>, 'WHERE': <TokenType.WHERE: 423>, 'WINDOW': <TokenType.WINDOW: 424>, 'WITH': <TokenType.WITH: 425>, 'APPLY': <TokenType.APPLY: 221>, 'ARRAY': <TokenType.ARRAY: 222>, 'BIT': <TokenType.BIT: 95>, 'BOOL': <TokenType.BOOLEAN: 96>, 'BOOLEAN': <TokenType.BOOLEAN: 96>, 'BYTE': <TokenType.TINYINT: 97>, 'MEDIUMINT': <TokenType.MEDIUMINT: 101>, 'INT1': <TokenType.TINYINT: 97>, 'TINYINT': <TokenType.TINYINT: 97>, 'INT16': <TokenType.SMALLINT: 99>, 'SHORT': <TokenType.SMALLINT: 99>, 'SMALLINT': <TokenType.SMALLINT: 99>, 'HUGEINT': <TokenType.INT128: 108>, 'UHUGEINT': <TokenType.UINT128: 109>, 'INT2': <TokenType.SMALLINT: 99>, 'INTEGER': <TokenType.INT: 103>, 'INT': <TokenType.INT: 103>, 'INT4': <TokenType.INT: 103>, 'INT32': <TokenType.INT: 103>, 'INT64': <TokenType.BIGINT: 105>, 'INT128': <TokenType.INT128: 108>, 'INT256': <TokenType.INT256: 110>, 'LONG': <TokenType.BIGINT: 105>, 'BIGINT': <TokenType.BIGINT: 105>, 'INT8': <TokenType.BIGINT: 105>, 'UINT': <TokenType.UINT: 104>, 'UINT128': <TokenType.UINT128: 109>, 'UINT256': <TokenType.UINT256: 111>, 'DEC': <TokenType.DECIMAL: 115>, 'DECIMAL': <TokenType.DECIMAL: 115>, 'DECIMAL32': <TokenType.DECIMAL32: 116>, 'DECIMAL64': <TokenType.DECIMAL64: 117>, 'DECIMAL128': <TokenType.DECIMAL128: 118>, 'DECIMAL256': <TokenType.DECIMAL256: 119>, 'DECFLOAT': <TokenType.DECFLOAT: 120>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 122>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 122>, 'BIGNUM': <TokenType.BIGNUM: 107>, 'LIST': <TokenType.LIST: 317>, 'MAP': <TokenType.MAP: 320>, 'NULLABLE': <TokenType.NULLABLE: 172>, 'NUMBER': <TokenType.DECIMAL: 115>, 'NUMERIC': <TokenType.DECIMAL: 115>, 'FIXED': <TokenType.DECIMAL: 115>, 'REAL': <TokenType.FLOAT: 112>, 'FLOAT': <TokenType.DOUBLE: 113>, 'FLOAT4': <TokenType.FLOAT: 112>, 'FLOAT8': <TokenType.DOUBLE: 113>, 'DOUBLE': <TokenType.DOUBLE: 113>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 113>, 'JSON': <TokenType.JSON: 139>, 'JSONB': <TokenType.JSONB: 140>, 'CHAR': <TokenType.CHAR: 123>, 'CHARACTER': <TokenType.CHAR: 123>, 'CHAR VARYING': <TokenType.VARCHAR: 125>, 'CHARACTER VARYING': <TokenType.VARCHAR: 125>, 'NCHAR': <TokenType.NCHAR: 124>, 'VARCHAR': <TokenType.VARCHAR: 125>, 'VARCHAR2': <TokenType.VARCHAR: 125>, 'NVARCHAR': <TokenType.NVARCHAR: 126>, 'NVARCHAR2': <TokenType.NVARCHAR: 126>, 'BPCHAR': <TokenType.BPCHAR: 127>, 'STR': <TokenType.TEXT: 128>, 'STRING': <TokenType.TEXT: 128>, 'TEXT': <TokenType.TEXT: 128>, 'LONGTEXT': <TokenType.LONGTEXT: 130>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 129>, 'TINYTEXT': <TokenType.TINYTEXT: 135>, 'CLOB': <TokenType.TEXT: 128>, 'LONGVARCHAR': <TokenType.TEXT: 128>, 'BINARY': <TokenType.BINARY: 137>, 'BLOB': <TokenType.VARBINARY: 138>, 'LONGBLOB': <TokenType.LONGBLOB: 133>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 132>, 'TINYBLOB': <TokenType.TINYBLOB: 134>, 'BYTEA': <TokenType.VARBINARY: 138>, 'VARBINARY': <TokenType.VARBINARY: 138>, 'TIME': <TokenType.TIME: 141>, 'TIMETZ': <TokenType.TIMETZ: 142>, 'TIME_NS': <TokenType.TIME_NS: 143>, 'TIMESTAMP': <TokenType.TIMESTAMP: 144>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 145>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 146>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 146>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 147>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 147>, 'DATE': <TokenType.DATE: 155>, 'DATETIME': <TokenType.DATETIME: 151>, 'INT4RANGE': <TokenType.INT4RANGE: 157>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 158>, 'INT8RANGE': <TokenType.INT8RANGE: 159>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 160>, 'NUMRANGE': <TokenType.NUMRANGE: 161>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 162>, 'TSRANGE': <TokenType.TSRANGE: 163>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 164>, 'TSTZRANGE': <TokenType.TSTZRANGE: 165>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 166>, 'DATERANGE': <TokenType.DATERANGE: 167>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 168>, 'UNIQUE': <TokenType.UNIQUE: 426>, 'VECTOR': <TokenType.VECTOR: 213>, 'STRUCT': <TokenType.STRUCT: 399>, 'SEQUENCE': <TokenType.SEQUENCE: 386>, 'VARIANT': <TokenType.VARIANT: 196>, 'ALTER': <TokenType.ALTER: 217>, 'ANALYZE': <TokenType.ANALYZE: 435>, 'CALL': <TokenType.COMMAND: 235>, 'COMMENT': <TokenType.COMMENT: 236>, 'EXPLAIN': <TokenType.COMMAND: 235>, 'GRANT': <TokenType.GRANT: 286>, 'REVOKE': <TokenType.REVOKE: 373>, 'OPTIMIZE': <TokenType.COMMAND: 235>, 'PREPARE': <TokenType.COMMAND: 235>, 'VACUUM': <TokenType.COMMAND: 235>, 'USER-DEFINED': <TokenType.USERDEFINED: 191>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 430>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 431>, '~': <TokenType.RLIKE: 376>, '@@': <TokenType.DAT: 52>, '@>': <TokenType.AT_GT: 54>, '<@': <TokenType.LT_AT: 53>, '?&': <TokenType.QMARK_AMP: 66>, '?|': <TokenType.QMARK_PIPE: 67>, '#-': <TokenType.HASH_DASH: 68>, '|/': <TokenType.PIPE_SLASH: 40>, '||/': <TokenType.DPIPE_SLASH: 41>, 'BIGSERIAL': <TokenType.BIGSERIAL: 188>, 'CSTRING': <TokenType.PSEUDO_TYPE: 362>, 'DECLARE': <TokenType.COMMAND: 235>, 'DO': <TokenType.COMMAND: 235>, 'EXEC': <TokenType.COMMAND: 235>, 'HSTORE': <TokenType.HSTORE: 184>, 'MONEY': <TokenType.MONEY: 192>, 'NAME': <TokenType.NAME: 136>, 'OID': <TokenType.OBJECT_IDENTIFIER: 333>, 'ONLY': <TokenType.ONLY: 336>, 'POINT': <TokenType.POINT: 174>, 'REFRESH': <TokenType.COMMAND: 235>, 'REINDEX': <TokenType.COMMAND: 235>, 'RESET': <TokenType.COMMAND: 235>, 'SERIAL': <TokenType.SERIAL: 186>, 'SMALLSERIAL': <TokenType.SMALLSERIAL: 187>, 'REGCLASS': <TokenType.OBJECT_IDENTIFIER: 333>, 'REGCOLLATION': <TokenType.OBJECT_IDENTIFIER: 333>, 'REGCONFIG': <TokenType.OBJECT_IDENTIFIER: 333>, 'REGDICTIONARY': <TokenType.OBJECT_IDENTIFIER: 333>, 'REGNAMESPACE': <TokenType.OBJECT_IDENTIFIER: 333>, 'REGOPER': <TokenType.OBJECT_IDENTIFIER: 333>, 'REGOPERATOR': <TokenType.OBJECT_IDENTIFIER: 333>, 'REGPROC': <TokenType.OBJECT_IDENTIFIER: 333>, 'REGPROCEDURE': <TokenType.OBJECT_IDENTIFIER: 333>, 'REGROLE': <TokenType.OBJECT_IDENTIFIER: 333>, 'REGTYPE': <TokenType.OBJECT_IDENTIFIER: 333>, 'XML': <TokenType.XML: 189>, 'VARIADIC': <TokenType.VARIADIC: 417>, 'INOUT': <TokenType.INOUT: 343>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 1>, ')': <TokenType.R_PAREN: 2>, '[': <TokenType.L_BRACKET: 3>, ']': <TokenType.R_BRACKET: 4>, '{': <TokenType.L_BRACE: 5>, '}': <TokenType.R_BRACE: 6>, '&': <TokenType.AMP: 36>, '^': <TokenType.CARET: 42>, ':': <TokenType.COLON: 11>, ',': <TokenType.COMMA: 7>, '.': <TokenType.DOT: 8>, '-': <TokenType.DASH: 9>, '=': <TokenType.EQ: 28>, '>': <TokenType.GT: 25>, '<': <TokenType.LT: 23>, '%': <TokenType.MOD: 326>, '!': <TokenType.NOT: 27>, '|': <TokenType.PIPE: 39>, '+': <TokenType.PLUS: 10>, ';': <TokenType.SEMICOLON: 19>, '/': <TokenType.SLASH: 22>, '\\': <TokenType.BACKSLASH: 21>, '*': <TokenType.STAR: 20>, '~': <TokenType.TILDE: 44>, '?': <TokenType.PLACEHOLDER: 353>, '@': <TokenType.PARAMETER: 56>, '#': <TokenType.HASH: 48>, "'": <TokenType.UNKNOWN: 212>, '`': <TokenType.UNKNOWN: 212>, '"': <TokenType.UNKNOWN: 212>, '$': <TokenType.HEREDOC_STRING: 93>}
VAR_SINGLE_TOKENS = {'$'}