Edit on GitHub

sqlglot.dialects.drill

 1from __future__ import annotations
 2
 3
 4from sqlglot import tokens
 5from sqlglot.dialects.dialect import Dialect
 6from sqlglot.generators.drill import DrillGenerator
 7from sqlglot.parsers.drill import DrillParser
 8
 9
10class Drill(Dialect):
11    NORMALIZE_FUNCTIONS: bool | str = False
12    PRESERVE_ORIGINAL_NAMES = True
13    NULL_ORDERING = "nulls_are_last"
14    DATE_FORMAT = "'yyyy-MM-dd'"
15    DATEINT_FORMAT = "'yyyyMMdd'"
16    TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'"
17    SUPPORTS_USER_DEFINED_TYPES = False
18    TYPED_DIVISION = True
19    CONCAT_COALESCE = True
20
21    TIME_MAPPING = {
22        "y": "%Y",
23        "Y": "%Y",
24        "YYYY": "%Y",
25        "yyyy": "%Y",
26        "YY": "%y",
27        "yy": "%y",
28        "MMMM": "%B",
29        "MMM": "%b",
30        "MM": "%m",
31        "M": "%-m",
32        "dd": "%d",
33        "d": "%-d",
34        "HH": "%H",
35        "H": "%-H",
36        "hh": "%I",
37        "h": "%-I",
38        "mm": "%M",
39        "m": "%-M",
40        "ss": "%S",
41        "s": "%-S",
42        "SSSSSS": "%f",
43        "a": "%p",
44        "DD": "%j",
45        "D": "%-j",
46        "E": "%a",
47        "EE": "%a",
48        "EEE": "%a",
49        "EEEE": "%A",
50        "''T''": "T",
51    }
52
53    class Tokenizer(tokens.Tokenizer):
54        IDENTIFIERS = ["`"]
55        STRING_ESCAPES = ["\\"]
56
57        KEYWORDS = tokens.Tokenizer.KEYWORDS.copy()
58        KEYWORDS.pop("/*+")
59
60    Parser = DrillParser
61
62    Generator = DrillGenerator
class Drill(sqlglot.dialects.dialect.Dialect):
11class Drill(Dialect):
12    NORMALIZE_FUNCTIONS: bool | str = False
13    PRESERVE_ORIGINAL_NAMES = True
14    NULL_ORDERING = "nulls_are_last"
15    DATE_FORMAT = "'yyyy-MM-dd'"
16    DATEINT_FORMAT = "'yyyyMMdd'"
17    TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'"
18    SUPPORTS_USER_DEFINED_TYPES = False
19    TYPED_DIVISION = True
20    CONCAT_COALESCE = True
21
22    TIME_MAPPING = {
23        "y": "%Y",
24        "Y": "%Y",
25        "YYYY": "%Y",
26        "yyyy": "%Y",
27        "YY": "%y",
28        "yy": "%y",
29        "MMMM": "%B",
30        "MMM": "%b",
31        "MM": "%m",
32        "M": "%-m",
33        "dd": "%d",
34        "d": "%-d",
35        "HH": "%H",
36        "H": "%-H",
37        "hh": "%I",
38        "h": "%-I",
39        "mm": "%M",
40        "m": "%-M",
41        "ss": "%S",
42        "s": "%-S",
43        "SSSSSS": "%f",
44        "a": "%p",
45        "DD": "%j",
46        "D": "%-j",
47        "E": "%a",
48        "EE": "%a",
49        "EEE": "%a",
50        "EEEE": "%A",
51        "''T''": "T",
52    }
53
54    class Tokenizer(tokens.Tokenizer):
55        IDENTIFIERS = ["`"]
56        STRING_ESCAPES = ["\\"]
57
58        KEYWORDS = tokens.Tokenizer.KEYWORDS.copy()
59        KEYWORDS.pop("/*+")
60
61    Parser = DrillParser
62
63    Generator = DrillGenerator
NORMALIZE_FUNCTIONS: bool | str = False

Determines how function names are going to be normalized.

Possible values:

"upper" or True: Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.

PRESERVE_ORIGINAL_NAMES = True

Whether the name of the function should be preserved inside the node's metadata, can be useful for roundtripping deprecated vs new functions that share an AST node e.g JSON_VALUE vs JSON_EXTRACT_SCALAR in BigQuery

NULL_ORDERING = 'nulls_are_last'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

DATE_FORMAT = "'yyyy-MM-dd'"
DATEINT_FORMAT = "'yyyyMMdd'"
TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

TYPED_DIVISION = True

Whether the behavior of a / b depends on the types of a and b. False means a / b is always float division. True means a / b is integer division if both a and b are integers.

CONCAT_COALESCE = True

A NULL arg in CONCAT yields NULL by default, but in some dialects it yields an empty string.

TIME_MAPPING = {'y': '%Y', 'Y': '%Y', 'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'MMM': '%b', 'MM': '%m', 'M': '%-m', 'dd': '%d', 'd': '%-d', 'HH': '%H', 'H': '%-H', 'hh': '%I', 'h': '%-I', 'mm': '%M', 'm': '%-M', 'ss': '%S', 's': '%-S', 'SSSSSS': '%f', 'a': '%p', 'DD': '%j', 'D': '%-j', 'E': '%a', 'EE': '%a', 'EEE': '%a', 'EEEE': '%A', "''T''": 'T'}

Associates this dialect's time formats with their equivalent Python strftime formats.

SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

STRINGS_SUPPORT_ESCAPED_SEQUENCES: bool = True

Whether string literals support escape sequences (e.g. \n). Set by the metaclass based on the tokenizer's STRING_ESCAPES.

BYTE_STRINGS_SUPPORT_ESCAPED_SEQUENCES: bool = True

Whether byte string literals support escape sequences. Set by the metaclass based on the tokenizer's BYTE_STRING_ESCAPES.

INITCAP_SUPPORTS_CUSTOM_DELIMITERS = False
tokenizer_class = <class 'Drill.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.dialects.dialect.JSONPathTokenizer'>
parser_class = <class 'sqlglot.parsers.drill.DrillParser'>
generator_class = <class 'sqlglot.generators.drill.DrillGenerator'>
TIME_TRIE: dict = {'y': {0: True, 'y': {'y': {'y': {0: True}}, 0: True}}, 'Y': {0: True, 'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}, 0: True}, 0: True}, 0: True}, 'd': {'d': {0: True}, 0: True}, 'H': {'H': {0: True}, 0: True}, 'h': {'h': {0: True}, 0: True}, 'm': {'m': {0: True}, 0: True}, 's': {'s': {0: True}, 0: True}, 'S': {'S': {'S': {'S': {'S': {'S': {0: True}}}}}}, 'a': {0: True}, 'D': {'D': {0: True}, 0: True}, 'E': {0: True, 'E': {0: True, 'E': {0: True, 'E': {0: True}}}}, "'": {"'": {'T': {"'": {"'": {0: True}}}}}}
FORMAT_TRIE: dict = {'y': {0: True, 'y': {'y': {'y': {0: True}}, 0: True}}, 'Y': {0: True, 'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}, 0: True}, 0: True}, 0: True}, 'd': {'d': {0: True}, 0: True}, 'H': {'H': {0: True}, 0: True}, 'h': {'h': {0: True}, 0: True}, 'm': {'m': {0: True}, 0: True}, 's': {'s': {0: True}, 0: True}, 'S': {'S': {'S': {'S': {'S': {'S': {0: True}}}}}}, 'a': {0: True}, 'D': {'D': {0: True}, 0: True}, 'E': {0: True, 'E': {0: True, 'E': {0: True, 'E': {0: True}}}}, "'": {"'": {'T': {"'": {"'": {0: True}}}}}}
INVERSE_TIME_MAPPING: dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'MMMM', '%b': 'MMM', '%m': 'MM', '%-m': 'M', '%d': 'dd', '%-d': 'd', '%H': 'HH', '%-H': 'H', '%I': 'hh', '%-I': 'h', '%M': 'mm', '%-M': 'm', '%S': 'ss', '%-S': 's', '%f': 'SSSSSS', '%p': 'a', '%j': 'DD', '%-j': 'D', '%a': 'EEE', '%A': 'EEEE', 'T': "''T''"}
INVERSE_TIME_TRIE: dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, '-': {'m': {0: True}, 'd': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'j': {0: True}}, 'd': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}, 'p': {0: True}, 'j': {0: True}, 'a': {0: True}, 'A': {0: True}}, 'T': {0: True}}
INVERSE_FORMAT_MAPPING: dict[str, str] = {}
INVERSE_FORMAT_TRIE: dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
VALID_INTERVAL_UNITS: set[str] = {'QTRS', 'TZH', 'HR', 'DAYS', 'US', 'NSEC', 'MICROSECS', 'WK', 'NSECOND', 'MSECONDS', 'WOY', 'MINUTE', 'WEEKDAY', 'MILLISEC', 'MILLISECS', 'MONS', 'MONTHS', 'NSECONDS', 'USECS', 'Y', 'YEAR', 'DAYOFWEEK', 'SECONDS', 'QTR', 'YYYY', 'MILLISECONDS', 'DECADE', 'MM', 'MSECS', 'HOUR', 'DAYOFWEEK_ISO', 'MICROSEC', 'CENTS', 'MS', 'DAYOFWEEKISO', 'MICROSECONDS', 'MILLISECOND', 'USECONDS', 'SECS', 'EPOCH_NANOSECONDS', 'QUARTER', 'USEC', 'HH', 'YRS', 'WEEK', 'EPOCH_MICROSECOND', 'DECADES', 'WEEKOFYEAR', 'WY', 'NANOSECOND', 'DOW', 'SECOND', 'SEC', 'MIL', 'CENTURY', 'HRS', 'DAYOFYEAR', 'YY', 'NANOSEC', 'NANOSECS', 'M', 'WEEKOFYEARISO', 'YEARS', 'Q', 'MILLENNIUM', 'WEEKISO', 'DAY OF WEEK', 'WEEK_ISO', 'MILS', 'MINS', 'NS', 'W', 'EPOCH', 'D', 'DAYOFMONTH', 'DAY', 'MILLISECON', 'CENTURIES', 'S', 'QUARTERS', 'WEEKDAY_ISO', 'WEEKOFYEAR_ISO', 'DEC', 'DW_ISO', 'DY', 'C', 'MONTH', 'YR', 'DW', 'YYY', 'EPOCH_SECOND', 'MIN', 'DD', 'DOW_ISO', 'MI', 'EPOCH_MICROSECONDS', 'MSEC', 'DAY OF YEAR', 'TZM', 'MINUTES', 'MON', 'MSECOND', 'DOY', 'EPOCH_MILLISECONDS', 'MILLENIA', 'EPOCH_NANOSECOND', 'EPOCH_MILLISECOND', 'MICROSECOND', 'EPOCH_SECONDS', 'TIMEZONE_HOUR', 'H', 'HOURS', 'TIMEZONE_MINUTE', 'CENT', 'DECS', 'USECOND'}
BIT_START: str | None = None
BIT_END: str | None = None
HEX_START: str | None = None
HEX_END: str | None = None
BYTE_START: str | None = None
BYTE_END: str | None = None
UNICODE_START: str | None = None
UNICODE_END: str | None = None
class Drill.Tokenizer(sqlglot.tokens.Tokenizer):
54    class Tokenizer(tokens.Tokenizer):
55        IDENTIFIERS = ["`"]
56        STRING_ESCAPES = ["\\"]
57
58        KEYWORDS = tokens.Tokenizer.KEYWORDS.copy()
59        KEYWORDS.pop("/*+")
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
KEYWORDS = {'{%': <TokenType.BLOCK_START: 71>, '{%+': <TokenType.BLOCK_START: 71>, '{%-': <TokenType.BLOCK_START: 71>, '%}': <TokenType.BLOCK_END: 72>, '+%}': <TokenType.BLOCK_END: 72>, '-%}': <TokenType.BLOCK_END: 72>, '{{+': <TokenType.BLOCK_START: 71>, '{{-': <TokenType.BLOCK_START: 71>, '+}}': <TokenType.BLOCK_END: 72>, '-}}': <TokenType.BLOCK_END: 72>, '&<': <TokenType.AMP_LT: 61>, '&>': <TokenType.AMP_GT: 62>, '==': <TokenType.EQ: 28>, '::': <TokenType.DCOLON: 14>, '?::': <TokenType.QDCOLON: 366>, '||': <TokenType.DPIPE: 37>, '|>': <TokenType.PIPE_GT: 38>, '>=': <TokenType.GTE: 26>, '<=': <TokenType.LTE: 24>, '<>': <TokenType.NEQ: 29>, '!=': <TokenType.NEQ: 29>, ':=': <TokenType.COLON_EQ: 31>, '<=>': <TokenType.NULLSAFE_EQ: 30>, '->': <TokenType.ARROW: 45>, '->>': <TokenType.DARROW: 46>, '=>': <TokenType.FARROW: 47>, '#>': <TokenType.HASH_ARROW: 49>, '#>>': <TokenType.DHASH_ARROW: 50>, '<->': <TokenType.LR_ARROW: 51>, '&&': <TokenType.DAMP: 60>, '??': <TokenType.DQMARK: 18>, '~~~': <TokenType.GLOB: 284>, '~~': <TokenType.LIKE: 315>, '~~*': <TokenType.ILIKE: 292>, '~*': <TokenType.IRLIKE: 304>, '-|-': <TokenType.ADJACENT: 63>, 'ALL': <TokenType.ALL: 218>, 'AND': <TokenType.AND: 34>, 'ANTI': <TokenType.ANTI: 219>, 'ANY': <TokenType.ANY: 220>, 'ASC': <TokenType.ASC: 223>, 'AS': <TokenType.ALIAS: 216>, 'ASOF': <TokenType.ASOF: 224>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 226>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 226>, 'BEGIN': <TokenType.BEGIN: 227>, 'BETWEEN': <TokenType.BETWEEN: 228>, 'CACHE': <TokenType.CACHE: 230>, 'UNCACHE': <TokenType.UNCACHE: 409>, 'CASE': <TokenType.CASE: 231>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 232>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 233>, 'COLLATE': <TokenType.COLLATE: 234>, 'COLUMN': <TokenType.COLUMN: 79>, 'COMMIT': <TokenType.COMMIT: 237>, 'CONNECT BY': <TokenType.CONNECT_BY: 238>, 'CONSTRAINT': <TokenType.CONSTRAINT: 239>, 'COPY': <TokenType.COPY: 240>, 'CREATE': <TokenType.CREATE: 241>, 'CROSS': <TokenType.CROSS: 242>, 'CUBE': <TokenType.CUBE: 243>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 244>, 'CURRENT_SCHEMA': <TokenType.CURRENT_SCHEMA: 246>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 247>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 248>, 'CURRENT_USER': <TokenType.CURRENT_USER: 249>, 'CURRENT_CATALOG': <TokenType.CURRENT_CATALOG: 251>, 'DATABASE': <TokenType.DATABASE: 78>, 'DEFAULT': <TokenType.DEFAULT: 253>, 'DELETE': <TokenType.DELETE: 254>, 'DESC': <TokenType.DESC: 255>, 'DESCRIBE': <TokenType.DESCRIBE: 256>, 'DISTINCT': <TokenType.DISTINCT: 259>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 260>, 'DIV': <TokenType.DIV: 261>, 'DROP': <TokenType.DROP: 262>, 'ELSE': <TokenType.ELSE: 263>, 'END': <TokenType.END: 264>, 'ENUM': <TokenType.ENUM: 203>, 'ESCAPE': <TokenType.ESCAPE: 265>, 'EXCEPT': <TokenType.EXCEPT: 266>, 'EXECUTE': <TokenType.EXECUTE: 267>, 'EXISTS': <TokenType.EXISTS: 268>, 'FALSE': <TokenType.FALSE: 269>, 'FETCH': <TokenType.FETCH: 270>, 'FILTER': <TokenType.FILTER: 273>, 'FILE': <TokenType.FILE: 271>, 'FIRST': <TokenType.FIRST: 275>, 'FULL': <TokenType.FULL: 281>, 'FUNCTION': <TokenType.FUNCTION: 282>, 'FOR': <TokenType.FOR: 276>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 278>, 'FORMAT': <TokenType.FORMAT: 279>, 'FROM': <TokenType.FROM: 280>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 170>, 'GEOMETRY': <TokenType.GEOMETRY: 173>, 'GLOB': <TokenType.GLOB: 284>, 'GROUP BY': <TokenType.GROUP_BY: 287>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 288>, 'HAVING': <TokenType.HAVING: 289>, 'ILIKE': <TokenType.ILIKE: 292>, 'IN': <TokenType.IN: 293>, 'INDEX': <TokenType.INDEX: 294>, 'INET': <TokenType.INET: 198>, 'INNER': <TokenType.INNER: 296>, 'INSERT': <TokenType.INSERT: 297>, 'INTERVAL': <TokenType.INTERVAL: 301>, 'INTERSECT': <TokenType.INTERSECT: 300>, 'INTO': <TokenType.INTO: 302>, 'IS': <TokenType.IS: 305>, 'ISNULL': <TokenType.ISNULL: 306>, 'JOIN': <TokenType.JOIN: 307>, 'KEEP': <TokenType.KEEP: 309>, 'KILL': <TokenType.KILL: 311>, 'LATERAL': <TokenType.LATERAL: 313>, 'LEFT': <TokenType.LEFT: 314>, 'LIKE': <TokenType.LIKE: 315>, 'LIMIT': <TokenType.LIMIT: 316>, 'LOAD': <TokenType.LOAD: 318>, 'LOCALTIME': <TokenType.LOCALTIME: 177>, 'LOCALTIMESTAMP': <TokenType.LOCALTIMESTAMP: 178>, 'LOCK': <TokenType.LOCK: 319>, 'MERGE': <TokenType.MERGE: 325>, 'NAMESPACE': <TokenType.NAMESPACE: 436>, 'NATURAL': <TokenType.NATURAL: 328>, 'NEXT': <TokenType.NEXT: 329>, 'NOT': <TokenType.NOT: 27>, 'NOTNULL': <TokenType.NOTNULL: 331>, 'NULL': <TokenType.NULL: 332>, 'OBJECT': <TokenType.OBJECT: 197>, 'OFFSET': <TokenType.OFFSET: 334>, 'ON': <TokenType.ON: 335>, 'OR': <TokenType.OR: 35>, 'XOR': <TokenType.XOR: 64>, 'ORDER BY': <TokenType.ORDER_BY: 338>, 'ORDINALITY': <TokenType.ORDINALITY: 341>, 'OUT': <TokenType.OUT: 342>, 'OUTER': <TokenType.OUTER: 344>, 'OVER': <TokenType.OVER: 345>, 'OVERLAPS': <TokenType.OVERLAPS: 346>, 'OVERWRITE': <TokenType.OVERWRITE: 347>, 'PARTITION': <TokenType.PARTITION: 349>, 'PARTITION BY': <TokenType.PARTITION_BY: 350>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 350>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 350>, 'PERCENT': <TokenType.PERCENT: 351>, 'PIVOT': <TokenType.PIVOT: 352>, 'PRAGMA': <TokenType.PRAGMA: 357>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 359>, 'PROCEDURE': <TokenType.PROCEDURE: 360>, 'OPERATOR': <TokenType.OPERATOR: 337>, 'QUALIFY': <TokenType.QUALIFY: 364>, 'RANGE': <TokenType.RANGE: 367>, 'RECURSIVE': <TokenType.RECURSIVE: 368>, 'REGEXP': <TokenType.RLIKE: 376>, 'RENAME': <TokenType.RENAME: 370>, 'REPLACE': <TokenType.REPLACE: 371>, 'RETURNING': <TokenType.RETURNING: 372>, 'REFERENCES': <TokenType.REFERENCES: 374>, 'RIGHT': <TokenType.RIGHT: 375>, 'RLIKE': <TokenType.RLIKE: 376>, 'ROLLBACK': <TokenType.ROLLBACK: 378>, 'ROLLUP': <TokenType.ROLLUP: 379>, 'ROW': <TokenType.ROW: 380>, 'ROWS': <TokenType.ROWS: 381>, 'SCHEMA': <TokenType.SCHEMA: 81>, 'SELECT': <TokenType.SELECT: 383>, 'SEMI': <TokenType.SEMI: 384>, 'SESSION': <TokenType.SESSION: 57>, 'SESSION_USER': <TokenType.SESSION_USER: 59>, 'SET': <TokenType.SET: 388>, 'SETTINGS': <TokenType.SETTINGS: 389>, 'SHOW': <TokenType.SHOW: 390>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 391>, 'SOME': <TokenType.SOME: 392>, 'SORT BY': <TokenType.SORT_BY: 393>, 'SQL SECURITY': <TokenType.SQL_SECURITY: 395>, 'START WITH': <TokenType.START_WITH: 396>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 398>, 'TABLE': <TokenType.TABLE: 82>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 401>, 'TEMP': <TokenType.TEMPORARY: 403>, 'TEMPORARY': <TokenType.TEMPORARY: 403>, 'THEN': <TokenType.THEN: 405>, 'TRUE': <TokenType.TRUE: 406>, 'TRUNCATE': <TokenType.TRUNCATE: 407>, 'TRIGGER': <TokenType.TRIGGER: 408>, 'UNION': <TokenType.UNION: 410>, 'UNKNOWN': <TokenType.UNKNOWN: 212>, 'UNNEST': <TokenType.UNNEST: 411>, 'UNPIVOT': <TokenType.UNPIVOT: 412>, 'UPDATE': <TokenType.UPDATE: 413>, 'USE': <TokenType.USE: 414>, 'USING': <TokenType.USING: 415>, 'UUID': <TokenType.UUID: 169>, 'VALUES': <TokenType.VALUES: 416>, 'VIEW': <TokenType.VIEW: 418>, 'VOLATILE': <TokenType.VOLATILE: 420>, 'WHEN': <TokenType.WHEN: 422>, 'WHERE': <TokenType.WHERE: 423>, 'WINDOW': <TokenType.WINDOW: 424>, 'WITH': <TokenType.WITH: 425>, 'APPLY': <TokenType.APPLY: 221>, 'ARRAY': <TokenType.ARRAY: 222>, 'BIT': <TokenType.BIT: 95>, 'BOOL': <TokenType.BOOLEAN: 96>, 'BOOLEAN': <TokenType.BOOLEAN: 96>, 'BYTE': <TokenType.TINYINT: 97>, 'MEDIUMINT': <TokenType.MEDIUMINT: 101>, 'INT1': <TokenType.TINYINT: 97>, 'TINYINT': <TokenType.TINYINT: 97>, 'INT16': <TokenType.SMALLINT: 99>, 'SHORT': <TokenType.SMALLINT: 99>, 'SMALLINT': <TokenType.SMALLINT: 99>, 'HUGEINT': <TokenType.INT128: 108>, 'UHUGEINT': <TokenType.UINT128: 109>, 'INT2': <TokenType.SMALLINT: 99>, 'INTEGER': <TokenType.INT: 103>, 'INT': <TokenType.INT: 103>, 'INT4': <TokenType.INT: 103>, 'INT32': <TokenType.INT: 103>, 'INT64': <TokenType.BIGINT: 105>, 'INT128': <TokenType.INT128: 108>, 'INT256': <TokenType.INT256: 110>, 'LONG': <TokenType.BIGINT: 105>, 'BIGINT': <TokenType.BIGINT: 105>, 'INT8': <TokenType.TINYINT: 97>, 'UINT': <TokenType.UINT: 104>, 'UINT128': <TokenType.UINT128: 109>, 'UINT256': <TokenType.UINT256: 111>, 'DEC': <TokenType.DECIMAL: 115>, 'DECIMAL': <TokenType.DECIMAL: 115>, 'DECIMAL32': <TokenType.DECIMAL32: 116>, 'DECIMAL64': <TokenType.DECIMAL64: 117>, 'DECIMAL128': <TokenType.DECIMAL128: 118>, 'DECIMAL256': <TokenType.DECIMAL256: 119>, 'DECFLOAT': <TokenType.DECFLOAT: 120>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 122>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 122>, 'BIGNUM': <TokenType.BIGNUM: 107>, 'LIST': <TokenType.LIST: 317>, 'MAP': <TokenType.MAP: 320>, 'NULLABLE': <TokenType.NULLABLE: 172>, 'NUMBER': <TokenType.DECIMAL: 115>, 'NUMERIC': <TokenType.DECIMAL: 115>, 'FIXED': <TokenType.DECIMAL: 115>, 'REAL': <TokenType.FLOAT: 112>, 'FLOAT': <TokenType.FLOAT: 112>, 'FLOAT4': <TokenType.FLOAT: 112>, 'FLOAT8': <TokenType.DOUBLE: 113>, 'DOUBLE': <TokenType.DOUBLE: 113>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 113>, 'JSON': <TokenType.JSON: 139>, 'JSONB': <TokenType.JSONB: 140>, 'CHAR': <TokenType.CHAR: 123>, 'CHARACTER': <TokenType.CHAR: 123>, 'CHAR VARYING': <TokenType.VARCHAR: 125>, 'CHARACTER VARYING': <TokenType.VARCHAR: 125>, 'NCHAR': <TokenType.NCHAR: 124>, 'VARCHAR': <TokenType.VARCHAR: 125>, 'VARCHAR2': <TokenType.VARCHAR: 125>, 'NVARCHAR': <TokenType.NVARCHAR: 126>, 'NVARCHAR2': <TokenType.NVARCHAR: 126>, 'BPCHAR': <TokenType.BPCHAR: 127>, 'STR': <TokenType.TEXT: 128>, 'STRING': <TokenType.TEXT: 128>, 'TEXT': <TokenType.TEXT: 128>, 'LONGTEXT': <TokenType.LONGTEXT: 130>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 129>, 'TINYTEXT': <TokenType.TINYTEXT: 135>, 'CLOB': <TokenType.TEXT: 128>, 'LONGVARCHAR': <TokenType.TEXT: 128>, 'BINARY': <TokenType.BINARY: 137>, 'BLOB': <TokenType.VARBINARY: 138>, 'LONGBLOB': <TokenType.LONGBLOB: 133>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 132>, 'TINYBLOB': <TokenType.TINYBLOB: 134>, 'BYTEA': <TokenType.VARBINARY: 138>, 'VARBINARY': <TokenType.VARBINARY: 138>, 'TIME': <TokenType.TIME: 141>, 'TIMETZ': <TokenType.TIMETZ: 142>, 'TIME_NS': <TokenType.TIME_NS: 143>, 'TIMESTAMP': <TokenType.TIMESTAMP: 144>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 145>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 146>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 146>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 147>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 147>, 'DATE': <TokenType.DATE: 155>, 'DATETIME': <TokenType.DATETIME: 151>, 'INT4RANGE': <TokenType.INT4RANGE: 157>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 158>, 'INT8RANGE': <TokenType.INT8RANGE: 159>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 160>, 'NUMRANGE': <TokenType.NUMRANGE: 161>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 162>, 'TSRANGE': <TokenType.TSRANGE: 163>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 164>, 'TSTZRANGE': <TokenType.TSTZRANGE: 165>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 166>, 'DATERANGE': <TokenType.DATERANGE: 167>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 168>, 'UNIQUE': <TokenType.UNIQUE: 426>, 'VECTOR': <TokenType.VECTOR: 213>, 'STRUCT': <TokenType.STRUCT: 399>, 'SEQUENCE': <TokenType.SEQUENCE: 386>, 'VARIANT': <TokenType.VARIANT: 196>, 'ALTER': <TokenType.ALTER: 217>, 'ANALYZE': <TokenType.ANALYZE: 435>, 'CALL': <TokenType.COMMAND: 235>, 'COMMENT': <TokenType.COMMENT: 236>, 'EXPLAIN': <TokenType.COMMAND: 235>, 'GRANT': <TokenType.GRANT: 286>, 'REVOKE': <TokenType.REVOKE: 373>, 'OPTIMIZE': <TokenType.COMMAND: 235>, 'PREPARE': <TokenType.COMMAND: 235>, 'VACUUM': <TokenType.COMMAND: 235>, 'USER-DEFINED': <TokenType.USERDEFINED: 191>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 430>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 431>}
BYTE_STRING_ESCAPES: ClassVar[list[str]] = ['\\']