Edit on GitHub

sqlglot.dialects.postgres

  1from __future__ import annotations
  2
  3from sqlglot import exp, tokens
  4from sqlglot.dialects.dialect import Dialect
  5from sqlglot.generators.postgres import PostgresGenerator
  6from sqlglot.parsers.postgres import PostgresParser
  7from sqlglot.tokens import TokenType
  8
  9
 10class Postgres(Dialect):
 11    INDEX_OFFSET = 1
 12    TYPED_DIVISION = True
 13    CONCAT_COALESCE = True
 14    CONCAT_WS_COALESCE = True
 15    NULL_ORDERING = "nulls_are_large"
 16    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 17    TABLESAMPLE_SIZE_IS_PERCENT = True
 18    TABLES_REFERENCEABLE_AS_COLUMNS = True
 19
 20    DEFAULT_FUNCTIONS_COLUMN_NAMES = {
 21        exp.ExplodingGenerateSeries: "generate_series",
 22    }
 23
 24    TIME_MAPPING = {
 25        "d": "%u",  # 1-based day of week
 26        "D": "%u",  # 1-based day of week
 27        "dd": "%d",  # day of month
 28        "DD": "%d",  # day of month
 29        "ddd": "%j",  # zero padded day of year
 30        "DDD": "%j",  # zero padded day of year
 31        "FMDD": "%-d",  # - is no leading zero for Python; same for FM in postgres
 32        "FMDDD": "%-j",  # day of year
 33        "FMHH12": "%-I",  # 9
 34        "FMHH24": "%-H",  # 9
 35        "FMMI": "%-M",  # Minute
 36        "FMMM": "%-m",  # 1
 37        "FMSS": "%-S",  # Second
 38        "HH12": "%I",  # 09
 39        "HH24": "%H",  # 09
 40        "mi": "%M",  # zero padded minute
 41        "MI": "%M",  # zero padded minute
 42        "mm": "%m",  # 01
 43        "MM": "%m",  # 01
 44        "OF": "%z",  # utc offset
 45        "ss": "%S",  # zero padded second
 46        "SS": "%S",  # zero padded second
 47        "TMDay": "%A",  # TM is locale dependent
 48        "TMDy": "%a",
 49        "TMMon": "%b",  # Sep
 50        "TMMonth": "%B",  # September
 51        "TZ": "%Z",  # uppercase timezone name
 52        "US": "%f",  # zero padded microsecond
 53        "ww": "%U",  # 1-based week of year
 54        "WW": "%U",  # 1-based week of year
 55        "yy": "%y",  # 15
 56        "YY": "%y",  # 15
 57        "yyyy": "%Y",  # 2015
 58        "YYYY": "%Y",  # 2015
 59    }
 60
 61    class Tokenizer(tokens.Tokenizer):
 62        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
 63        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 64        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
 65        BYTE_STRING_ESCAPES = ["'", "\\"]
 66        HEREDOC_STRINGS = ["$"]
 67
 68        HEREDOC_TAG_IS_IDENTIFIER = True
 69        HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER
 70
 71        KEYWORDS = {
 72            **tokens.Tokenizer.KEYWORDS,
 73            "~": TokenType.RLIKE,
 74            "@@": TokenType.DAT,
 75            "@>": TokenType.AT_GT,
 76            "<@": TokenType.LT_AT,
 77            "?&": TokenType.QMARK_AMP,
 78            "?|": TokenType.QMARK_PIPE,
 79            "#-": TokenType.HASH_DASH,
 80            "|/": TokenType.PIPE_SLASH,
 81            "||/": TokenType.DPIPE_SLASH,
 82            "BEGIN": TokenType.BEGIN,
 83            "BIGSERIAL": TokenType.BIGSERIAL,
 84            "CSTRING": TokenType.PSEUDO_TYPE,
 85            "DECLARE": TokenType.COMMAND,
 86            "DO": TokenType.COMMAND,
 87            "EXEC": TokenType.COMMAND,
 88            "HSTORE": TokenType.HSTORE,
 89            "INT8": TokenType.BIGINT,
 90            "MONEY": TokenType.MONEY,
 91            "NAME": TokenType.NAME,
 92            "OID": TokenType.OBJECT_IDENTIFIER,
 93            "ONLY": TokenType.ONLY,
 94            "POINT": TokenType.POINT,
 95            "REFRESH": TokenType.COMMAND,
 96            "REINDEX": TokenType.COMMAND,
 97            "RESET": TokenType.COMMAND,
 98            "SERIAL": TokenType.SERIAL,
 99            "SMALLSERIAL": TokenType.SMALLSERIAL,
100            "TEMP": TokenType.TEMPORARY,
101            "TYPE": TokenType.TYPE,
102            "REGCLASS": TokenType.OBJECT_IDENTIFIER,
103            "REGCOLLATION": TokenType.OBJECT_IDENTIFIER,
104            "REGCONFIG": TokenType.OBJECT_IDENTIFIER,
105            "REGDICTIONARY": TokenType.OBJECT_IDENTIFIER,
106            "REGNAMESPACE": TokenType.OBJECT_IDENTIFIER,
107            "REGOPER": TokenType.OBJECT_IDENTIFIER,
108            "REGOPERATOR": TokenType.OBJECT_IDENTIFIER,
109            "REGPROC": TokenType.OBJECT_IDENTIFIER,
110            "REGPROCEDURE": TokenType.OBJECT_IDENTIFIER,
111            "REGROLE": TokenType.OBJECT_IDENTIFIER,
112            "REGTYPE": TokenType.OBJECT_IDENTIFIER,
113            "FLOAT": TokenType.DOUBLE,
114            "XML": TokenType.XML,
115            "VARIADIC": TokenType.VARIADIC,
116            "INOUT": TokenType.INOUT,
117        }
118        KEYWORDS.pop("/*+")
119        KEYWORDS.pop("DIV")
120
121        SINGLE_TOKENS = {
122            **tokens.Tokenizer.SINGLE_TOKENS,
123            "$": TokenType.HEREDOC_STRING,
124        }
125
126        VAR_SINGLE_TOKENS = {"$"}
127
128    Parser = PostgresParser
129
130    Generator = PostgresGenerator
class Postgres(sqlglot.dialects.dialect.Dialect):
 11class Postgres(Dialect):
 12    INDEX_OFFSET = 1
 13    TYPED_DIVISION = True
 14    CONCAT_COALESCE = True
 15    CONCAT_WS_COALESCE = True
 16    NULL_ORDERING = "nulls_are_large"
 17    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 18    TABLESAMPLE_SIZE_IS_PERCENT = True
 19    TABLES_REFERENCEABLE_AS_COLUMNS = True
 20
 21    DEFAULT_FUNCTIONS_COLUMN_NAMES = {
 22        exp.ExplodingGenerateSeries: "generate_series",
 23    }
 24
 25    TIME_MAPPING = {
 26        "d": "%u",  # 1-based day of week
 27        "D": "%u",  # 1-based day of week
 28        "dd": "%d",  # day of month
 29        "DD": "%d",  # day of month
 30        "ddd": "%j",  # zero padded day of year
 31        "DDD": "%j",  # zero padded day of year
 32        "FMDD": "%-d",  # - is no leading zero for Python; same for FM in postgres
 33        "FMDDD": "%-j",  # day of year
 34        "FMHH12": "%-I",  # 9
 35        "FMHH24": "%-H",  # 9
 36        "FMMI": "%-M",  # Minute
 37        "FMMM": "%-m",  # 1
 38        "FMSS": "%-S",  # Second
 39        "HH12": "%I",  # 09
 40        "HH24": "%H",  # 09
 41        "mi": "%M",  # zero padded minute
 42        "MI": "%M",  # zero padded minute
 43        "mm": "%m",  # 01
 44        "MM": "%m",  # 01
 45        "OF": "%z",  # utc offset
 46        "ss": "%S",  # zero padded second
 47        "SS": "%S",  # zero padded second
 48        "TMDay": "%A",  # TM is locale dependent
 49        "TMDy": "%a",
 50        "TMMon": "%b",  # Sep
 51        "TMMonth": "%B",  # September
 52        "TZ": "%Z",  # uppercase timezone name
 53        "US": "%f",  # zero padded microsecond
 54        "ww": "%U",  # 1-based week of year
 55        "WW": "%U",  # 1-based week of year
 56        "yy": "%y",  # 15
 57        "YY": "%y",  # 15
 58        "yyyy": "%Y",  # 2015
 59        "YYYY": "%Y",  # 2015
 60    }
 61
 62    class Tokenizer(tokens.Tokenizer):
 63        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
 64        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 65        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
 66        BYTE_STRING_ESCAPES = ["'", "\\"]
 67        HEREDOC_STRINGS = ["$"]
 68
 69        HEREDOC_TAG_IS_IDENTIFIER = True
 70        HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER
 71
 72        KEYWORDS = {
 73            **tokens.Tokenizer.KEYWORDS,
 74            "~": TokenType.RLIKE,
 75            "@@": TokenType.DAT,
 76            "@>": TokenType.AT_GT,
 77            "<@": TokenType.LT_AT,
 78            "?&": TokenType.QMARK_AMP,
 79            "?|": TokenType.QMARK_PIPE,
 80            "#-": TokenType.HASH_DASH,
 81            "|/": TokenType.PIPE_SLASH,
 82            "||/": TokenType.DPIPE_SLASH,
 83            "BEGIN": TokenType.BEGIN,
 84            "BIGSERIAL": TokenType.BIGSERIAL,
 85            "CSTRING": TokenType.PSEUDO_TYPE,
 86            "DECLARE": TokenType.COMMAND,
 87            "DO": TokenType.COMMAND,
 88            "EXEC": TokenType.COMMAND,
 89            "HSTORE": TokenType.HSTORE,
 90            "INT8": TokenType.BIGINT,
 91            "MONEY": TokenType.MONEY,
 92            "NAME": TokenType.NAME,
 93            "OID": TokenType.OBJECT_IDENTIFIER,
 94            "ONLY": TokenType.ONLY,
 95            "POINT": TokenType.POINT,
 96            "REFRESH": TokenType.COMMAND,
 97            "REINDEX": TokenType.COMMAND,
 98            "RESET": TokenType.COMMAND,
 99            "SERIAL": TokenType.SERIAL,
100            "SMALLSERIAL": TokenType.SMALLSERIAL,
101            "TEMP": TokenType.TEMPORARY,
102            "TYPE": TokenType.TYPE,
103            "REGCLASS": TokenType.OBJECT_IDENTIFIER,
104            "REGCOLLATION": TokenType.OBJECT_IDENTIFIER,
105            "REGCONFIG": TokenType.OBJECT_IDENTIFIER,
106            "REGDICTIONARY": TokenType.OBJECT_IDENTIFIER,
107            "REGNAMESPACE": TokenType.OBJECT_IDENTIFIER,
108            "REGOPER": TokenType.OBJECT_IDENTIFIER,
109            "REGOPERATOR": TokenType.OBJECT_IDENTIFIER,
110            "REGPROC": TokenType.OBJECT_IDENTIFIER,
111            "REGPROCEDURE": TokenType.OBJECT_IDENTIFIER,
112            "REGROLE": TokenType.OBJECT_IDENTIFIER,
113            "REGTYPE": TokenType.OBJECT_IDENTIFIER,
114            "FLOAT": TokenType.DOUBLE,
115            "XML": TokenType.XML,
116            "VARIADIC": TokenType.VARIADIC,
117            "INOUT": TokenType.INOUT,
118        }
119        KEYWORDS.pop("/*+")
120        KEYWORDS.pop("DIV")
121
122        SINGLE_TOKENS = {
123            **tokens.Tokenizer.SINGLE_TOKENS,
124            "$": TokenType.HEREDOC_STRING,
125        }
126
127        VAR_SINGLE_TOKENS = {"$"}
128
129    Parser = PostgresParser
130
131    Generator = PostgresGenerator
INDEX_OFFSET = 1

The base index offset for arrays.

TYPED_DIVISION = True

Whether the behavior of a / b depends on the types of a and b. False means a / b is always float division. True means a / b is integer division if both a and b are integers.

CONCAT_COALESCE = True

A NULL arg in CONCAT yields NULL by default, but in some dialects it yields an empty string.

CONCAT_WS_COALESCE = True

A NULL arg in CONCAT_WS yields NULL by default, but in some dialects it is skipped.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

TABLES_REFERENCEABLE_AS_COLUMNS = True

Whether table names can be referenced as columns (treated as structs).

BigQuery allows tables to be referenced as columns in queries, automatically treating them as struct values containing all the table's columns.

For example, in BigQuery: SELECT t FROM my_table AS t -- Returns entire row as a struct

DEFAULT_FUNCTIONS_COLUMN_NAMES: dict[type[sqlglot.expressions.core.Func], str | tuple[str, ...]] = {<class 'sqlglot.expressions.array.ExplodingGenerateSeries'>: 'generate_series'}

Maps function expressions to their default output column name(s).

For example, in Postgres, generate_series function outputs a column named "generate_series" by default, so we map the ExplodingGenerateSeries expression to "generate_series" string.

TIME_MAPPING: dict[str, str] = {'d': '%u', 'D': '%u', 'dd': '%d', 'DD': '%d', 'ddd': '%j', 'DDD': '%j', 'FMDD': '%-d', 'FMDDD': '%-j', 'FMHH12': '%-I', 'FMHH24': '%-H', 'FMMI': '%-M', 'FMMM': '%-m', 'FMSS': '%-S', 'HH12': '%I', 'HH24': '%H', 'mi': '%M', 'MI': '%M', 'mm': '%m', 'MM': '%m', 'OF': '%z', 'ss': '%S', 'SS': '%S', 'TMDay': '%A', 'TMDy': '%a', 'TMMon': '%b', 'TMMonth': '%B', 'TZ': '%Z', 'US': '%f', 'ww': '%U', 'WW': '%U', 'yy': '%y', 'YY': '%y', 'yyyy': '%Y', 'YYYY': '%Y'}

Associates this dialect's time formats with their equivalent Python strftime formats.

SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

STRINGS_SUPPORT_ESCAPED_SEQUENCES: bool = False

Whether string literals support escape sequences (e.g. \n). Set by the metaclass based on the tokenizer's STRING_ESCAPES.

BYTE_STRINGS_SUPPORT_ESCAPED_SEQUENCES: bool = True

Whether byte string literals support escape sequences. Set by the metaclass based on the tokenizer's BYTE_STRING_ESCAPES.

INITCAP_SUPPORTS_CUSTOM_DELIMITERS = False
tokenizer_class = <class 'Postgres.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.dialects.dialect.JSONPathTokenizer'>
parser_class = <class 'sqlglot.parsers.postgres.PostgresParser'>
generator_class = <class 'sqlglot.generators.postgres.PostgresGenerator'>
TIME_TRIE: dict = {'d': {0: True, 'd': {0: True, 'd': {0: True}}}, 'D': {0: True, 'D': {0: True, 'D': {0: True}}}, 'F': {'M': {'D': {'D': {0: True, 'D': {0: True}}}, 'H': {'H': {'1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'M': {'I': {0: True}, 'M': {0: True}}, 'S': {'S': {0: True}}}}, 'H': {'H': {'1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'm': {'i': {0: True}, 'm': {0: True}}, 'M': {'I': {0: True}, 'M': {0: True}}, 'O': {'F': {0: True}}, 's': {'s': {0: True}}, 'S': {'S': {0: True}}, 'T': {'M': {'D': {'a': {'y': {0: True}}, 'y': {0: True}}, 'M': {'o': {'n': {0: True, 't': {'h': {0: True}}}}}}, 'Z': {0: True}}, 'U': {'S': {0: True}}, 'w': {'w': {0: True}}, 'W': {'W': {0: True}}, 'y': {'y': {0: True, 'y': {'y': {0: True}}}}, 'Y': {'Y': {0: True, 'Y': {'Y': {0: True}}}}}
FORMAT_TRIE: dict = {'d': {0: True, 'd': {0: True, 'd': {0: True}}}, 'D': {0: True, 'D': {0: True, 'D': {0: True}}}, 'F': {'M': {'D': {'D': {0: True, 'D': {0: True}}}, 'H': {'H': {'1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'M': {'I': {0: True}, 'M': {0: True}}, 'S': {'S': {0: True}}}}, 'H': {'H': {'1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'm': {'i': {0: True}, 'm': {0: True}}, 'M': {'I': {0: True}, 'M': {0: True}}, 'O': {'F': {0: True}}, 's': {'s': {0: True}}, 'S': {'S': {0: True}}, 'T': {'M': {'D': {'a': {'y': {0: True}}, 'y': {0: True}}, 'M': {'o': {'n': {0: True, 't': {'h': {0: True}}}}}}, 'Z': {0: True}}, 'U': {'S': {0: True}}, 'w': {'w': {0: True}}, 'W': {'W': {0: True}}, 'y': {'y': {0: True, 'y': {'y': {0: True}}}}, 'Y': {'Y': {0: True, 'Y': {'Y': {0: True}}}}}
INVERSE_TIME_MAPPING: dict[str, str] = {'%u': 'D', '%d': 'DD', '%j': 'DDD', '%-d': 'FMDD', '%-j': 'FMDDD', '%-I': 'FMHH12', '%-H': 'FMHH24', '%-M': 'FMMI', '%-m': 'FMMM', '%-S': 'FMSS', '%I': 'HH12', '%H': 'HH24', '%M': 'MI', '%m': 'MM', '%z': 'OF', '%S': 'SS', '%A': 'TMDay', '%a': 'TMDy', '%b': 'TMMon', '%B': 'TMMonth', '%Z': 'TZ', '%f': 'US', '%U': 'WW', '%y': 'YY', '%Y': 'YYYY'}
INVERSE_TIME_TRIE: dict = {'%': {'u': {0: True}, 'd': {0: True}, 'j': {0: True}, '-': {'d': {0: True}, 'j': {0: True}, 'I': {0: True}, 'H': {0: True}, 'M': {0: True}, 'm': {0: True}, 'S': {0: True}}, 'I': {0: True}, 'H': {0: True}, 'M': {0: True}, 'm': {0: True}, 'z': {0: True}, 'S': {0: True}, 'A': {0: True}, 'a': {0: True}, 'b': {0: True}, 'B': {0: True}, 'Z': {0: True}, 'f': {0: True}, 'U': {0: True}, 'y': {0: True}, 'Y': {0: True}}}
INVERSE_FORMAT_MAPPING: dict[str, str] = {}
INVERSE_FORMAT_TRIE: dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
VALID_INTERVAL_UNITS: set[str] = {'DAYOFYEAR', 'H', 'MICROSEC', 'NSECONDS', 'S', 'MONTHS', 'DEC', 'EPOCH_SECOND', 'WEEKOFYEAR', 'CENTS', 'DW', 'QUARTERS', 'WEEKDAY_ISO', 'DECADE', 'NANOSEC', 'CENTURY', 'EPOCH', 'MSECS', 'M', 'DAYOFWEEK', 'WEEKDAY', 'CENTURIES', 'MILS', 'MI', 'DAYOFWEEKISO', 'MONTH', 'HOURS', 'DOW_ISO', 'WEEK_ISO', 'SEC', 'NSECOND', 'DAY', 'HH', 'MSEC', 'EPOCH_MICROSECONDS', 'CENT', 'DAYOFWEEK_ISO', 'YYY', 'YEAR', 'USECOND', 'C', 'EPOCH_SECONDS', 'YR', 'MIN', 'NSEC', 'WY', 'USECS', 'WEEKOFYEAR_ISO', 'DAY OF YEAR', 'YEARS', 'NANOSECOND', 'QTR', 'DOW', 'WEEKISO', 'EPOCH_MICROSECOND', 'DY', 'MILLISEC', 'MM', 'W', 'WEEK', 'SECONDS', 'NANOSECS', 'MILLISECON', 'SECOND', 'USECONDS', 'D', 'US', 'EPOCH_NANOSECONDS', 'Y', 'NS', 'EPOCH_NANOSECOND', 'YYYY', 'DW_ISO', 'DAYOFMONTH', 'QUARTER', 'MON', 'Q', 'MIL', 'MONS', 'WOY', 'WEEKOFYEARISO', 'MINUTE', 'YY', 'USEC', 'YRS', 'MILLISECONDS', 'MILLENNIUM', 'TIMEZONE_MINUTE', 'HR', 'HOUR', 'MICROSECONDS', 'EPOCH_MILLISECONDS', 'MILLISECOND', 'MILLISECS', 'MINS', 'MSECOND', 'WK', 'MSECONDS', 'DD', 'TIMEZONE_HOUR', 'TZM', 'MICROSECOND', 'MINUTES', 'DECS', 'QTRS', 'MS', 'TZH', 'MICROSECS', 'EPOCH_MILLISECOND', 'DECADES', 'HRS', 'DAY OF WEEK', 'SECS', 'DAYS', 'DOY', 'MILLENIA'}
BIT_START: str | None = "b'"
BIT_END: str | None = "'"
HEX_START: str | None = "x'"
HEX_END: str | None = "'"
BYTE_START: str | None = "e'"
BYTE_END: str | None = "'"
UNICODE_START: str | None = None
UNICODE_END: str | None = None
class Postgres.Tokenizer(sqlglot.tokens.Tokenizer):
 62    class Tokenizer(tokens.Tokenizer):
 63        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
 64        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 65        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
 66        BYTE_STRING_ESCAPES = ["'", "\\"]
 67        HEREDOC_STRINGS = ["$"]
 68
 69        HEREDOC_TAG_IS_IDENTIFIER = True
 70        HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER
 71
 72        KEYWORDS = {
 73            **tokens.Tokenizer.KEYWORDS,
 74            "~": TokenType.RLIKE,
 75            "@@": TokenType.DAT,
 76            "@>": TokenType.AT_GT,
 77            "<@": TokenType.LT_AT,
 78            "?&": TokenType.QMARK_AMP,
 79            "?|": TokenType.QMARK_PIPE,
 80            "#-": TokenType.HASH_DASH,
 81            "|/": TokenType.PIPE_SLASH,
 82            "||/": TokenType.DPIPE_SLASH,
 83            "BEGIN": TokenType.BEGIN,
 84            "BIGSERIAL": TokenType.BIGSERIAL,
 85            "CSTRING": TokenType.PSEUDO_TYPE,
 86            "DECLARE": TokenType.COMMAND,
 87            "DO": TokenType.COMMAND,
 88            "EXEC": TokenType.COMMAND,
 89            "HSTORE": TokenType.HSTORE,
 90            "INT8": TokenType.BIGINT,
 91            "MONEY": TokenType.MONEY,
 92            "NAME": TokenType.NAME,
 93            "OID": TokenType.OBJECT_IDENTIFIER,
 94            "ONLY": TokenType.ONLY,
 95            "POINT": TokenType.POINT,
 96            "REFRESH": TokenType.COMMAND,
 97            "REINDEX": TokenType.COMMAND,
 98            "RESET": TokenType.COMMAND,
 99            "SERIAL": TokenType.SERIAL,
100            "SMALLSERIAL": TokenType.SMALLSERIAL,
101            "TEMP": TokenType.TEMPORARY,
102            "TYPE": TokenType.TYPE,
103            "REGCLASS": TokenType.OBJECT_IDENTIFIER,
104            "REGCOLLATION": TokenType.OBJECT_IDENTIFIER,
105            "REGCONFIG": TokenType.OBJECT_IDENTIFIER,
106            "REGDICTIONARY": TokenType.OBJECT_IDENTIFIER,
107            "REGNAMESPACE": TokenType.OBJECT_IDENTIFIER,
108            "REGOPER": TokenType.OBJECT_IDENTIFIER,
109            "REGOPERATOR": TokenType.OBJECT_IDENTIFIER,
110            "REGPROC": TokenType.OBJECT_IDENTIFIER,
111            "REGPROCEDURE": TokenType.OBJECT_IDENTIFIER,
112            "REGROLE": TokenType.OBJECT_IDENTIFIER,
113            "REGTYPE": TokenType.OBJECT_IDENTIFIER,
114            "FLOAT": TokenType.DOUBLE,
115            "XML": TokenType.XML,
116            "VARIADIC": TokenType.VARIADIC,
117            "INOUT": TokenType.INOUT,
118        }
119        KEYWORDS.pop("/*+")
120        KEYWORDS.pop("DIV")
121
122        SINGLE_TOKENS = {
123            **tokens.Tokenizer.SINGLE_TOKENS,
124            "$": TokenType.HEREDOC_STRING,
125        }
126
127        VAR_SINGLE_TOKENS = {"$"}
BIT_STRINGS = [("b'", "'"), ("B'", "'")]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
BYTE_STRING_ESCAPES = ["'", '\\']
HEREDOC_STRINGS = ['$']
HEREDOC_TAG_IS_IDENTIFIER = True
HEREDOC_STRING_ALTERNATIVE = <TokenType.PARAMETER: 56>
KEYWORDS = {'{%': <TokenType.BLOCK_START: 71>, '{%+': <TokenType.BLOCK_START: 71>, '{%-': <TokenType.BLOCK_START: 71>, '%}': <TokenType.BLOCK_END: 72>, '+%}': <TokenType.BLOCK_END: 72>, '-%}': <TokenType.BLOCK_END: 72>, '{{+': <TokenType.BLOCK_START: 71>, '{{-': <TokenType.BLOCK_START: 71>, '+}}': <TokenType.BLOCK_END: 72>, '-}}': <TokenType.BLOCK_END: 72>, '&<': <TokenType.AMP_LT: 61>, '&>': <TokenType.AMP_GT: 62>, '==': <TokenType.EQ: 28>, '::': <TokenType.DCOLON: 14>, '?::': <TokenType.QDCOLON: 367>, '||': <TokenType.DPIPE: 37>, '|>': <TokenType.PIPE_GT: 38>, '>=': <TokenType.GTE: 26>, '<=': <TokenType.LTE: 24>, '<>': <TokenType.NEQ: 29>, '!=': <TokenType.NEQ: 29>, ':=': <TokenType.COLON_EQ: 31>, '<=>': <TokenType.NULLSAFE_EQ: 30>, '->': <TokenType.ARROW: 45>, '->>': <TokenType.DARROW: 46>, '=>': <TokenType.FARROW: 47>, '#>': <TokenType.HASH_ARROW: 49>, '#>>': <TokenType.DHASH_ARROW: 50>, '<->': <TokenType.LR_ARROW: 51>, '&&': <TokenType.DAMP: 60>, '??': <TokenType.DQMARK: 18>, '~~~': <TokenType.GLOB: 285>, '~~': <TokenType.LIKE: 316>, '~~*': <TokenType.ILIKE: 293>, '~*': <TokenType.IRLIKE: 305>, '-|-': <TokenType.ADJACENT: 63>, 'ALL': <TokenType.ALL: 218>, 'AND': <TokenType.AND: 34>, 'ANTI': <TokenType.ANTI: 219>, 'ANY': <TokenType.ANY: 220>, 'ASC': <TokenType.ASC: 223>, 'AS': <TokenType.ALIAS: 216>, 'ASOF': <TokenType.ASOF: 224>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 226>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 226>, 'BEGIN': <TokenType.BEGIN: 227>, 'BETWEEN': <TokenType.BETWEEN: 228>, 'CACHE': <TokenType.CACHE: 230>, 'UNCACHE': <TokenType.UNCACHE: 411>, 'CASE': <TokenType.CASE: 231>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 232>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 233>, 'COLLATE': <TokenType.COLLATE: 234>, 'COLUMN': <TokenType.COLUMN: 79>, 'COMMIT': <TokenType.COMMIT: 237>, 'CONNECT BY': <TokenType.CONNECT_BY: 238>, 'CONSTRAINT': <TokenType.CONSTRAINT: 239>, 'COPY': <TokenType.COPY: 240>, 'CREATE': <TokenType.CREATE: 241>, 'CROSS': <TokenType.CROSS: 242>, 'CUBE': <TokenType.CUBE: 243>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 244>, 'CURRENT_SCHEMA': <TokenType.CURRENT_SCHEMA: 246>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 247>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 248>, 'CURRENT_USER': <TokenType.CURRENT_USER: 249>, 'CURRENT_CATALOG': <TokenType.CURRENT_CATALOG: 252>, 'DATABASE': <TokenType.DATABASE: 78>, 'DEFAULT': <TokenType.DEFAULT: 254>, 'DELETE': <TokenType.DELETE: 255>, 'DESC': <TokenType.DESC: 256>, 'DESCRIBE': <TokenType.DESCRIBE: 257>, 'DISTINCT': <TokenType.DISTINCT: 260>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 261>, 'DROP': <TokenType.DROP: 263>, 'ELSE': <TokenType.ELSE: 264>, 'END': <TokenType.END: 265>, 'ENUM': <TokenType.ENUM: 203>, 'ESCAPE': <TokenType.ESCAPE: 266>, 'EXCEPT': <TokenType.EXCEPT: 267>, 'EXECUTE': <TokenType.EXECUTE: 268>, 'EXISTS': <TokenType.EXISTS: 269>, 'FALSE': <TokenType.FALSE: 270>, 'FETCH': <TokenType.FETCH: 271>, 'FILTER': <TokenType.FILTER: 274>, 'FILE': <TokenType.FILE: 272>, 'FIRST': <TokenType.FIRST: 276>, 'FULL': <TokenType.FULL: 282>, 'FUNCTION': <TokenType.FUNCTION: 283>, 'FOR': <TokenType.FOR: 277>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 279>, 'FORMAT': <TokenType.FORMAT: 280>, 'FROM': <TokenType.FROM: 281>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 170>, 'GEOMETRY': <TokenType.GEOMETRY: 173>, 'GLOB': <TokenType.GLOB: 285>, 'GROUP BY': <TokenType.GROUP_BY: 288>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 289>, 'HAVING': <TokenType.HAVING: 290>, 'ILIKE': <TokenType.ILIKE: 293>, 'IN': <TokenType.IN: 294>, 'INDEX': <TokenType.INDEX: 295>, 'INET': <TokenType.INET: 198>, 'INNER': <TokenType.INNER: 297>, 'INSERT': <TokenType.INSERT: 298>, 'INTERVAL': <TokenType.INTERVAL: 302>, 'INTERSECT': <TokenType.INTERSECT: 301>, 'INTO': <TokenType.INTO: 303>, 'IS': <TokenType.IS: 306>, 'ISNULL': <TokenType.ISNULL: 307>, 'JOIN': <TokenType.JOIN: 308>, 'KEEP': <TokenType.KEEP: 310>, 'KILL': <TokenType.KILL: 312>, 'LATERAL': <TokenType.LATERAL: 314>, 'LEFT': <TokenType.LEFT: 315>, 'LIKE': <TokenType.LIKE: 316>, 'LIMIT': <TokenType.LIMIT: 317>, 'LOAD': <TokenType.LOAD: 319>, 'LOCALTIME': <TokenType.LOCALTIME: 177>, 'LOCALTIMESTAMP': <TokenType.LOCALTIMESTAMP: 178>, 'LOCK': <TokenType.LOCK: 320>, 'MERGE': <TokenType.MERGE: 326>, 'NAMESPACE': <TokenType.NAMESPACE: 438>, 'NATURAL': <TokenType.NATURAL: 329>, 'NEXT': <TokenType.NEXT: 330>, 'NOT': <TokenType.NOT: 27>, 'NOTNULL': <TokenType.NOTNULL: 332>, 'NULL': <TokenType.NULL: 333>, 'OBJECT': <TokenType.OBJECT: 197>, 'OFFSET': <TokenType.OFFSET: 335>, 'ON': <TokenType.ON: 336>, 'OR': <TokenType.OR: 35>, 'XOR': <TokenType.XOR: 64>, 'ORDER BY': <TokenType.ORDER_BY: 339>, 'ORDINALITY': <TokenType.ORDINALITY: 342>, 'OUT': <TokenType.OUT: 343>, 'OUTER': <TokenType.OUTER: 345>, 'OVER': <TokenType.OVER: 346>, 'OVERLAPS': <TokenType.OVERLAPS: 347>, 'OVERWRITE': <TokenType.OVERWRITE: 348>, 'PARTITION': <TokenType.PARTITION: 350>, 'PARTITION BY': <TokenType.PARTITION_BY: 351>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 351>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 351>, 'PERCENT': <TokenType.PERCENT: 352>, 'PIVOT': <TokenType.PIVOT: 353>, 'PRAGMA': <TokenType.PRAGMA: 358>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 360>, 'PROCEDURE': <TokenType.PROCEDURE: 361>, 'OPERATOR': <TokenType.OPERATOR: 338>, 'QUALIFY': <TokenType.QUALIFY: 365>, 'RANGE': <TokenType.RANGE: 368>, 'RECURSIVE': <TokenType.RECURSIVE: 369>, 'REGEXP': <TokenType.RLIKE: 377>, 'RENAME': <TokenType.RENAME: 371>, 'REPLACE': <TokenType.REPLACE: 372>, 'RETURNING': <TokenType.RETURNING: 373>, 'REFERENCES': <TokenType.REFERENCES: 375>, 'RIGHT': <TokenType.RIGHT: 376>, 'RLIKE': <TokenType.RLIKE: 377>, 'ROLLBACK': <TokenType.ROLLBACK: 379>, 'ROLLUP': <TokenType.ROLLUP: 380>, 'ROW': <TokenType.ROW: 381>, 'ROWS': <TokenType.ROWS: 382>, 'SCHEMA': <TokenType.SCHEMA: 81>, 'SELECT': <TokenType.SELECT: 384>, 'SEMI': <TokenType.SEMI: 385>, 'SESSION': <TokenType.SESSION: 57>, 'SESSION_USER': <TokenType.SESSION_USER: 59>, 'SET': <TokenType.SET: 389>, 'SETTINGS': <TokenType.SETTINGS: 390>, 'SHOW': <TokenType.SHOW: 391>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 392>, 'SOME': <TokenType.SOME: 393>, 'SORT BY': <TokenType.SORT_BY: 394>, 'SQL SECURITY': <TokenType.SQL_SECURITY: 396>, 'START WITH': <TokenType.START_WITH: 397>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 399>, 'TABLE': <TokenType.TABLE: 82>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 402>, 'TEMP': <TokenType.TEMPORARY: 404>, 'TEMPORARY': <TokenType.TEMPORARY: 404>, 'THEN': <TokenType.THEN: 406>, 'TRUE': <TokenType.TRUE: 407>, 'TRUNCATE': <TokenType.TRUNCATE: 408>, 'TRIGGER': <TokenType.TRIGGER: 409>, 'UNION': <TokenType.UNION: 412>, 'UNKNOWN': <TokenType.UNKNOWN: 212>, 'UNNEST': <TokenType.UNNEST: 413>, 'UNPIVOT': <TokenType.UNPIVOT: 414>, 'UPDATE': <TokenType.UPDATE: 415>, 'USE': <TokenType.USE: 416>, 'USING': <TokenType.USING: 417>, 'UUID': <TokenType.UUID: 169>, 'VALUES': <TokenType.VALUES: 418>, 'VIEW': <TokenType.VIEW: 420>, 'VOLATILE': <TokenType.VOLATILE: 422>, 'WHEN': <TokenType.WHEN: 424>, 'WHERE': <TokenType.WHERE: 425>, 'WINDOW': <TokenType.WINDOW: 426>, 'WITH': <TokenType.WITH: 427>, 'APPLY': <TokenType.APPLY: 221>, 'ARRAY': <TokenType.ARRAY: 222>, 'BIT': <TokenType.BIT: 95>, 'BOOL': <TokenType.BOOLEAN: 96>, 'BOOLEAN': <TokenType.BOOLEAN: 96>, 'BYTE': <TokenType.TINYINT: 97>, 'MEDIUMINT': <TokenType.MEDIUMINT: 101>, 'INT1': <TokenType.TINYINT: 97>, 'TINYINT': <TokenType.TINYINT: 97>, 'INT16': <TokenType.SMALLINT: 99>, 'SHORT': <TokenType.SMALLINT: 99>, 'SMALLINT': <TokenType.SMALLINT: 99>, 'HUGEINT': <TokenType.INT128: 108>, 'UHUGEINT': <TokenType.UINT128: 109>, 'INT2': <TokenType.SMALLINT: 99>, 'INTEGER': <TokenType.INT: 103>, 'INT': <TokenType.INT: 103>, 'INT4': <TokenType.INT: 103>, 'INT32': <TokenType.INT: 103>, 'INT64': <TokenType.BIGINT: 105>, 'INT128': <TokenType.INT128: 108>, 'INT256': <TokenType.INT256: 110>, 'LONG': <TokenType.BIGINT: 105>, 'BIGINT': <TokenType.BIGINT: 105>, 'INT8': <TokenType.BIGINT: 105>, 'UINT': <TokenType.UINT: 104>, 'UINT128': <TokenType.UINT128: 109>, 'UINT256': <TokenType.UINT256: 111>, 'DEC': <TokenType.DECIMAL: 115>, 'DECIMAL': <TokenType.DECIMAL: 115>, 'DECIMAL32': <TokenType.DECIMAL32: 116>, 'DECIMAL64': <TokenType.DECIMAL64: 117>, 'DECIMAL128': <TokenType.DECIMAL128: 118>, 'DECIMAL256': <TokenType.DECIMAL256: 119>, 'DECFLOAT': <TokenType.DECFLOAT: 120>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 122>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 122>, 'BIGNUM': <TokenType.BIGNUM: 107>, 'LIST': <TokenType.LIST: 318>, 'MAP': <TokenType.MAP: 321>, 'NULLABLE': <TokenType.NULLABLE: 172>, 'NUMBER': <TokenType.DECIMAL: 115>, 'NUMERIC': <TokenType.DECIMAL: 115>, 'FIXED': <TokenType.DECIMAL: 115>, 'REAL': <TokenType.FLOAT: 112>, 'FLOAT': <TokenType.DOUBLE: 113>, 'FLOAT4': <TokenType.FLOAT: 112>, 'FLOAT8': <TokenType.DOUBLE: 113>, 'DOUBLE': <TokenType.DOUBLE: 113>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 113>, 'JSON': <TokenType.JSON: 139>, 'JSONB': <TokenType.JSONB: 140>, 'CHAR': <TokenType.CHAR: 123>, 'CHARACTER': <TokenType.CHAR: 123>, 'CHAR VARYING': <TokenType.VARCHAR: 125>, 'CHARACTER VARYING': <TokenType.VARCHAR: 125>, 'NCHAR': <TokenType.NCHAR: 124>, 'VARCHAR': <TokenType.VARCHAR: 125>, 'VARCHAR2': <TokenType.VARCHAR: 125>, 'NVARCHAR': <TokenType.NVARCHAR: 126>, 'NVARCHAR2': <TokenType.NVARCHAR: 126>, 'BPCHAR': <TokenType.BPCHAR: 127>, 'STR': <TokenType.TEXT: 128>, 'STRING': <TokenType.TEXT: 128>, 'TEXT': <TokenType.TEXT: 128>, 'LONGTEXT': <TokenType.LONGTEXT: 130>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 129>, 'TINYTEXT': <TokenType.TINYTEXT: 135>, 'CLOB': <TokenType.TEXT: 128>, 'LONGVARCHAR': <TokenType.TEXT: 128>, 'BINARY': <TokenType.BINARY: 137>, 'BLOB': <TokenType.VARBINARY: 138>, 'LONGBLOB': <TokenType.LONGBLOB: 133>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 132>, 'TINYBLOB': <TokenType.TINYBLOB: 134>, 'BYTEA': <TokenType.VARBINARY: 138>, 'VARBINARY': <TokenType.VARBINARY: 138>, 'TIME': <TokenType.TIME: 141>, 'TIMETZ': <TokenType.TIMETZ: 142>, 'TIME_NS': <TokenType.TIME_NS: 143>, 'TIMESTAMP': <TokenType.TIMESTAMP: 144>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 145>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 146>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 146>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 147>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 147>, 'DATE': <TokenType.DATE: 155>, 'DATETIME': <TokenType.DATETIME: 151>, 'INT4RANGE': <TokenType.INT4RANGE: 157>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 158>, 'INT8RANGE': <TokenType.INT8RANGE: 159>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 160>, 'NUMRANGE': <TokenType.NUMRANGE: 161>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 162>, 'TSRANGE': <TokenType.TSRANGE: 163>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 164>, 'TSTZRANGE': <TokenType.TSTZRANGE: 165>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 166>, 'DATERANGE': <TokenType.DATERANGE: 167>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 168>, 'UNIQUE': <TokenType.UNIQUE: 428>, 'VECTOR': <TokenType.VECTOR: 213>, 'STRUCT': <TokenType.STRUCT: 400>, 'SEQUENCE': <TokenType.SEQUENCE: 387>, 'VARIANT': <TokenType.VARIANT: 196>, 'ALTER': <TokenType.ALTER: 217>, 'ANALYZE': <TokenType.ANALYZE: 437>, 'CALL': <TokenType.COMMAND: 235>, 'COMMENT': <TokenType.COMMENT: 236>, 'EXPLAIN': <TokenType.COMMAND: 235>, 'GRANT': <TokenType.GRANT: 287>, 'REVOKE': <TokenType.REVOKE: 374>, 'OPTIMIZE': <TokenType.COMMAND: 235>, 'PREPARE': <TokenType.COMMAND: 235>, 'VACUUM': <TokenType.COMMAND: 235>, 'USER-DEFINED': <TokenType.USERDEFINED: 191>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 432>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 433>, '~': <TokenType.RLIKE: 377>, '@@': <TokenType.DAT: 52>, '@>': <TokenType.AT_GT: 54>, '<@': <TokenType.LT_AT: 53>, '?&': <TokenType.QMARK_AMP: 66>, '?|': <TokenType.QMARK_PIPE: 67>, '#-': <TokenType.HASH_DASH: 68>, '|/': <TokenType.PIPE_SLASH: 40>, '||/': <TokenType.DPIPE_SLASH: 41>, 'BIGSERIAL': <TokenType.BIGSERIAL: 188>, 'CSTRING': <TokenType.PSEUDO_TYPE: 363>, 'DECLARE': <TokenType.COMMAND: 235>, 'DO': <TokenType.COMMAND: 235>, 'EXEC': <TokenType.COMMAND: 235>, 'HSTORE': <TokenType.HSTORE: 184>, 'MONEY': <TokenType.MONEY: 192>, 'NAME': <TokenType.NAME: 136>, 'OID': <TokenType.OBJECT_IDENTIFIER: 334>, 'ONLY': <TokenType.ONLY: 337>, 'POINT': <TokenType.POINT: 174>, 'REFRESH': <TokenType.COMMAND: 235>, 'REINDEX': <TokenType.COMMAND: 235>, 'RESET': <TokenType.COMMAND: 235>, 'SERIAL': <TokenType.SERIAL: 186>, 'SMALLSERIAL': <TokenType.SMALLSERIAL: 187>, 'TYPE': <TokenType.TYPE: 410>, 'REGCLASS': <TokenType.OBJECT_IDENTIFIER: 334>, 'REGCOLLATION': <TokenType.OBJECT_IDENTIFIER: 334>, 'REGCONFIG': <TokenType.OBJECT_IDENTIFIER: 334>, 'REGDICTIONARY': <TokenType.OBJECT_IDENTIFIER: 334>, 'REGNAMESPACE': <TokenType.OBJECT_IDENTIFIER: 334>, 'REGOPER': <TokenType.OBJECT_IDENTIFIER: 334>, 'REGOPERATOR': <TokenType.OBJECT_IDENTIFIER: 334>, 'REGPROC': <TokenType.OBJECT_IDENTIFIER: 334>, 'REGPROCEDURE': <TokenType.OBJECT_IDENTIFIER: 334>, 'REGROLE': <TokenType.OBJECT_IDENTIFIER: 334>, 'REGTYPE': <TokenType.OBJECT_IDENTIFIER: 334>, 'XML': <TokenType.XML: 189>, 'VARIADIC': <TokenType.VARIADIC: 419>, 'INOUT': <TokenType.INOUT: 344>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 1>, ')': <TokenType.R_PAREN: 2>, '[': <TokenType.L_BRACKET: 3>, ']': <TokenType.R_BRACKET: 4>, '{': <TokenType.L_BRACE: 5>, '}': <TokenType.R_BRACE: 6>, '&': <TokenType.AMP: 36>, '^': <TokenType.CARET: 42>, ':': <TokenType.COLON: 11>, ',': <TokenType.COMMA: 7>, '.': <TokenType.DOT: 8>, '-': <TokenType.DASH: 9>, '=': <TokenType.EQ: 28>, '>': <TokenType.GT: 25>, '<': <TokenType.LT: 23>, '%': <TokenType.MOD: 327>, '!': <TokenType.NOT: 27>, '|': <TokenType.PIPE: 39>, '+': <TokenType.PLUS: 10>, ';': <TokenType.SEMICOLON: 19>, '/': <TokenType.SLASH: 22>, '\\': <TokenType.BACKSLASH: 21>, '*': <TokenType.STAR: 20>, '~': <TokenType.TILDE: 44>, '?': <TokenType.PLACEHOLDER: 354>, '@': <TokenType.PARAMETER: 56>, '#': <TokenType.HASH: 48>, "'": <TokenType.UNKNOWN: 212>, '`': <TokenType.UNKNOWN: 212>, '"': <TokenType.UNKNOWN: 212>, '$': <TokenType.HEREDOC_STRING: 93>}
VAR_SINGLE_TOKENS = {'$'}