Edit on GitHub

sqlglot.dialects.oracle

 1from __future__ import annotations
 2
 3import typing as t
 4
 5from sqlglot import exp, tokens
 6from sqlglot.dialects.dialect import (
 7    Dialect,
 8    NormalizationStrategy,
 9)
10from sqlglot.generators.oracle import OracleGenerator
11from sqlglot.parsers.oracle import OracleParser
12from sqlglot.tokens import TokenType
13
14
15class Oracle(Dialect):
16    ALIAS_POST_TABLESAMPLE = True
17    LOCKING_READS_SUPPORTED = True
18    TABLESAMPLE_SIZE_IS_PERCENT = True
19    NULL_ORDERING = "nulls_are_large"
20    ON_CONDITION_EMPTY_BEFORE_ERROR = False
21    ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False
22    DISABLES_ALIAS_REF_EXPANSION = True
23
24    # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm
25    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
26
27    # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
28    # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
29    TIME_MAPPING = {
30        "D": "%u",  # Day of week (1-7)
31        "DAY": "%A",  # name of day
32        "DD": "%d",  # day of month (1-31)
33        "DDD": "%j",  # day of year (1-366)
34        "DY": "%a",  # abbreviated name of day
35        "HH": "%I",  # Hour of day (1-12)
36        "HH12": "%I",  # alias for HH
37        "HH24": "%H",  # Hour of day (0-23)
38        "IW": "%V",  # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
39        "MI": "%M",  # Minute (0-59)
40        "MM": "%m",  # Month (01-12; January = 01)
41        "MON": "%b",  # Abbreviated name of month
42        "MONTH": "%B",  # Name of month
43        "SS": "%S",  # Second (0-59)
44        "WW": "%W",  # Week of year (1-53)
45        "YY": "%y",  # 15
46        "YYYY": "%Y",  # 2015
47        "FF6": "%f",  # only 6 digits are supported in python formats
48    }
49
50    PSEUDOCOLUMNS = {"ROWNUM", "ROWID", "OBJECT_ID", "OBJECT_VALUE", "LEVEL"}
51
52    def can_quote(self, identifier: exp.Identifier, identify: str | bool = "safe") -> bool:
53        # Disable quoting for pseudocolumns as it may break queries e.g
54        # `WHERE "ROWNUM" = ...` does not work but `WHERE ROWNUM = ...` does
55        return (
56            identifier.quoted or not isinstance(identifier.parent, exp.Pseudocolumn)
57        ) and super().can_quote(identifier, identify=identify)
58
59    class Tokenizer(tokens.Tokenizer):
60        VAR_SINGLE_TOKENS = {"@", "$", "#"}
61
62        UNICODE_STRINGS = [
63            (prefix + q, q)
64            for q in t.cast(list[str], tokens.Tokenizer.QUOTES)
65            for prefix in ("U", "u")
66        ]
67
68        NESTED_COMMENTS = False
69
70        KEYWORDS = {
71            **tokens.Tokenizer.KEYWORDS,
72            "(+)": TokenType.JOIN_MARKER,
73            "BINARY_DOUBLE": TokenType.DOUBLE,
74            "BINARY_FLOAT": TokenType.FLOAT,
75            "BULK COLLECT INTO": TokenType.BULK_COLLECT_INTO,
76            "COLUMNS": TokenType.COLUMN,
77            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
78            "MINUS": TokenType.EXCEPT,
79            "NVARCHAR2": TokenType.NVARCHAR,
80            "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY,
81            "SAMPLE": TokenType.TABLE_SAMPLE,
82            "START": TokenType.BEGIN,
83            "TOP": TokenType.TOP,
84            "VARCHAR2": TokenType.VARCHAR,
85            "SYSTIMESTAMP": TokenType.SYSTIMESTAMP,
86        }
87
88    Parser = OracleParser
89
90    Generator = OracleGenerator
class Oracle(sqlglot.dialects.dialect.Dialect):
16class Oracle(Dialect):
17    ALIAS_POST_TABLESAMPLE = True
18    LOCKING_READS_SUPPORTED = True
19    TABLESAMPLE_SIZE_IS_PERCENT = True
20    NULL_ORDERING = "nulls_are_large"
21    ON_CONDITION_EMPTY_BEFORE_ERROR = False
22    ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False
23    DISABLES_ALIAS_REF_EXPANSION = True
24
25    # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm
26    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
27
28    # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
29    # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
30    TIME_MAPPING = {
31        "D": "%u",  # Day of week (1-7)
32        "DAY": "%A",  # name of day
33        "DD": "%d",  # day of month (1-31)
34        "DDD": "%j",  # day of year (1-366)
35        "DY": "%a",  # abbreviated name of day
36        "HH": "%I",  # Hour of day (1-12)
37        "HH12": "%I",  # alias for HH
38        "HH24": "%H",  # Hour of day (0-23)
39        "IW": "%V",  # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
40        "MI": "%M",  # Minute (0-59)
41        "MM": "%m",  # Month (01-12; January = 01)
42        "MON": "%b",  # Abbreviated name of month
43        "MONTH": "%B",  # Name of month
44        "SS": "%S",  # Second (0-59)
45        "WW": "%W",  # Week of year (1-53)
46        "YY": "%y",  # 15
47        "YYYY": "%Y",  # 2015
48        "FF6": "%f",  # only 6 digits are supported in python formats
49    }
50
51    PSEUDOCOLUMNS = {"ROWNUM", "ROWID", "OBJECT_ID", "OBJECT_VALUE", "LEVEL"}
52
53    def can_quote(self, identifier: exp.Identifier, identify: str | bool = "safe") -> bool:
54        # Disable quoting for pseudocolumns as it may break queries e.g
55        # `WHERE "ROWNUM" = ...` does not work but `WHERE ROWNUM = ...` does
56        return (
57            identifier.quoted or not isinstance(identifier.parent, exp.Pseudocolumn)
58        ) and super().can_quote(identifier, identify=identify)
59
60    class Tokenizer(tokens.Tokenizer):
61        VAR_SINGLE_TOKENS = {"@", "$", "#"}
62
63        UNICODE_STRINGS = [
64            (prefix + q, q)
65            for q in t.cast(list[str], tokens.Tokenizer.QUOTES)
66            for prefix in ("U", "u")
67        ]
68
69        NESTED_COMMENTS = False
70
71        KEYWORDS = {
72            **tokens.Tokenizer.KEYWORDS,
73            "(+)": TokenType.JOIN_MARKER,
74            "BINARY_DOUBLE": TokenType.DOUBLE,
75            "BINARY_FLOAT": TokenType.FLOAT,
76            "BULK COLLECT INTO": TokenType.BULK_COLLECT_INTO,
77            "COLUMNS": TokenType.COLUMN,
78            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
79            "MINUS": TokenType.EXCEPT,
80            "NVARCHAR2": TokenType.NVARCHAR,
81            "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY,
82            "SAMPLE": TokenType.TABLE_SAMPLE,
83            "START": TokenType.BEGIN,
84            "TOP": TokenType.TOP,
85            "VARCHAR2": TokenType.VARCHAR,
86            "SYSTIMESTAMP": TokenType.SYSTIMESTAMP,
87        }
88
89    Parser = OracleParser
90
91    Generator = OracleGenerator
ALIAS_POST_TABLESAMPLE = True

Whether the table alias comes after tablesample.

LOCKING_READS_SUPPORTED = True
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

ON_CONDITION_EMPTY_BEFORE_ERROR = False

Whether "X ON EMPTY" should come before "X ON ERROR" (for dialects like T-SQL, MySQL, Oracle).

ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False
DISABLES_ALIAS_REF_EXPANSION = True

Whether alias reference expansion is disabled for this dialect.

Some dialects like Oracle do NOT support referencing aliases in projections or WHERE clauses. The original expression must be repeated instead.

For example, in Oracle: SELECT y.foo AS bar, bar * 2 AS baz FROM y -- INVALID SELECT y.foo AS bar, y.foo * 2 AS baz FROM y -- VALID

NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

TIME_MAPPING: dict[str, str] = {'D': '%u', 'DAY': '%A', 'DD': '%d', 'DDD': '%j', 'DY': '%a', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'IW': '%V', 'MI': '%M', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'SS': '%S', 'WW': '%W', 'YY': '%y', 'YYYY': '%Y', 'FF6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

PSEUDOCOLUMNS: set[str] = {'OBJECT_VALUE', 'OBJECT_ID', 'LEVEL', 'ROWID', 'ROWNUM'}

Columns that are auto-generated by the engine corresponding to this dialect. For example, such columns may be excluded from SELECT * queries.

def can_quote( self, identifier: sqlglot.expressions.core.Identifier, identify: str | bool = 'safe') -> bool:
53    def can_quote(self, identifier: exp.Identifier, identify: str | bool = "safe") -> bool:
54        # Disable quoting for pseudocolumns as it may break queries e.g
55        # `WHERE "ROWNUM" = ...` does not work but `WHERE ROWNUM = ...` does
56        return (
57            identifier.quoted or not isinstance(identifier.parent, exp.Pseudocolumn)
58        ) and super().can_quote(identifier, identify=identify)

Checks if an identifier can be quoted

Arguments:
  • identifier: The identifier to check.
  • identify: True: Always returns True except for certain cases. "safe": Only returns True if the identifier is case-insensitive. "unsafe": Only returns True if the identifier is case-sensitive.
Returns:

Whether the given text can be identified.

SUPPORTS_COLUMN_JOIN_MARKS = True

Whether the old-style outer join (+) syntax is supported.

STRINGS_SUPPORT_ESCAPED_SEQUENCES: bool = False

Whether string literals support escape sequences (e.g. \n). Set by the metaclass based on the tokenizer's STRING_ESCAPES.

BYTE_STRINGS_SUPPORT_ESCAPED_SEQUENCES: bool = False

Whether byte string literals support escape sequences. Set by the metaclass based on the tokenizer's BYTE_STRING_ESCAPES.

INITCAP_SUPPORTS_CUSTOM_DELIMITERS = False
tokenizer_class = <class 'Oracle.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.dialects.dialect.JSONPathTokenizer'>
parser_class = <class 'sqlglot.parsers.oracle.OracleParser'>
generator_class = <class 'sqlglot.generators.oracle.OracleGenerator'>
TIME_TRIE: dict = {'D': {0: True, 'A': {'Y': {0: True}}, 'D': {0: True, 'D': {0: True}}, 'Y': {0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'I': {'W': {0: True}}, 'M': {'I': {0: True}, 'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}}, 'S': {'S': {0: True}}, 'W': {'W': {0: True}}, 'Y': {'Y': {0: True, 'Y': {'Y': {0: True}}}}, 'F': {'F': {'6': {0: True}}}}
FORMAT_TRIE: dict = {'D': {0: True, 'A': {'Y': {0: True}}, 'D': {0: True, 'D': {0: True}}, 'Y': {0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'I': {'W': {0: True}}, 'M': {'I': {0: True}, 'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}}, 'S': {'S': {0: True}}, 'W': {'W': {0: True}}, 'Y': {'Y': {0: True, 'Y': {'Y': {0: True}}}}, 'F': {'F': {'6': {0: True}}}}
INVERSE_TIME_MAPPING: dict[str, str] = {'%u': 'D', '%A': 'DAY', '%d': 'DD', '%j': 'DDD', '%a': 'DY', '%I': 'HH12', '%H': 'HH24', '%V': 'IW', '%M': 'MI', '%m': 'MM', '%b': 'MON', '%B': 'MONTH', '%S': 'SS', '%W': 'WW', '%y': 'YY', '%Y': 'YYYY', '%f': 'FF6'}
INVERSE_TIME_TRIE: dict = {'%': {'u': {0: True}, 'A': {0: True}, 'd': {0: True}, 'j': {0: True}, 'a': {0: True}, 'I': {0: True}, 'H': {0: True}, 'V': {0: True}, 'M': {0: True}, 'm': {0: True}, 'b': {0: True}, 'B': {0: True}, 'S': {0: True}, 'W': {0: True}, 'y': {0: True}, 'Y': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: dict[str, str] = {}
INVERSE_FORMAT_TRIE: dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: dict[str, str] = {}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
VALID_INTERVAL_UNITS: set[str] = {'DAYOFYEAR', 'H', 'MICROSEC', 'NSECONDS', 'S', 'MONTHS', 'DEC', 'EPOCH_SECOND', 'WEEKOFYEAR', 'CENTS', 'DW', 'QUARTERS', 'WEEKDAY_ISO', 'DECADE', 'NANOSEC', 'CENTURY', 'EPOCH', 'MSECS', 'M', 'DAYOFWEEK', 'WEEKDAY', 'CENTURIES', 'MILS', 'MI', 'DAYOFWEEKISO', 'MONTH', 'HOURS', 'DOW_ISO', 'WEEK_ISO', 'SEC', 'NSECOND', 'DAY', 'HH', 'MSEC', 'EPOCH_MICROSECONDS', 'CENT', 'DAYOFWEEK_ISO', 'YYY', 'YEAR', 'USECOND', 'C', 'EPOCH_SECONDS', 'YR', 'MIN', 'NSEC', 'WY', 'USECS', 'WEEKOFYEAR_ISO', 'DAY OF YEAR', 'YEARS', 'NANOSECOND', 'QTR', 'DOW', 'WEEKISO', 'EPOCH_MICROSECOND', 'DY', 'MILLISEC', 'MM', 'W', 'WEEK', 'SECONDS', 'NANOSECS', 'MILLISECON', 'SECOND', 'USECONDS', 'D', 'US', 'EPOCH_NANOSECONDS', 'Y', 'NS', 'EPOCH_NANOSECOND', 'YYYY', 'DW_ISO', 'DAYOFMONTH', 'QUARTER', 'MON', 'Q', 'MIL', 'MONS', 'WOY', 'WEEKOFYEARISO', 'MINUTE', 'YY', 'USEC', 'YRS', 'MILLISECONDS', 'MILLENNIUM', 'TIMEZONE_MINUTE', 'HR', 'HOUR', 'MICROSECONDS', 'EPOCH_MILLISECONDS', 'MILLISECOND', 'MILLISECS', 'MINS', 'MSECOND', 'WK', 'MSECONDS', 'DD', 'TIMEZONE_HOUR', 'TZM', 'MICROSECOND', 'MINUTES', 'DECS', 'QTRS', 'MS', 'TZH', 'MICROSECS', 'EPOCH_MILLISECOND', 'DECADES', 'HRS', 'DAY OF WEEK', 'SECS', 'DAYS', 'DOY', 'MILLENIA'}
BIT_START: str | None = None
BIT_END: str | None = None
HEX_START: str | None = None
HEX_END: str | None = None
BYTE_START: str | None = None
BYTE_END: str | None = None
UNICODE_START: str | None = "U'"
UNICODE_END: str | None = "'"
class Oracle.Tokenizer(sqlglot.tokens.Tokenizer):
60    class Tokenizer(tokens.Tokenizer):
61        VAR_SINGLE_TOKENS = {"@", "$", "#"}
62
63        UNICODE_STRINGS = [
64            (prefix + q, q)
65            for q in t.cast(list[str], tokens.Tokenizer.QUOTES)
66            for prefix in ("U", "u")
67        ]
68
69        NESTED_COMMENTS = False
70
71        KEYWORDS = {
72            **tokens.Tokenizer.KEYWORDS,
73            "(+)": TokenType.JOIN_MARKER,
74            "BINARY_DOUBLE": TokenType.DOUBLE,
75            "BINARY_FLOAT": TokenType.FLOAT,
76            "BULK COLLECT INTO": TokenType.BULK_COLLECT_INTO,
77            "COLUMNS": TokenType.COLUMN,
78            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
79            "MINUS": TokenType.EXCEPT,
80            "NVARCHAR2": TokenType.NVARCHAR,
81            "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY,
82            "SAMPLE": TokenType.TABLE_SAMPLE,
83            "START": TokenType.BEGIN,
84            "TOP": TokenType.TOP,
85            "VARCHAR2": TokenType.VARCHAR,
86            "SYSTIMESTAMP": TokenType.SYSTIMESTAMP,
87        }
VAR_SINGLE_TOKENS = {'$', '#', '@'}
UNICODE_STRINGS = [("U'", "'"), ("u'", "'")]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 71>, '{%+': <TokenType.BLOCK_START: 71>, '{%-': <TokenType.BLOCK_START: 71>, '%}': <TokenType.BLOCK_END: 72>, '+%}': <TokenType.BLOCK_END: 72>, '-%}': <TokenType.BLOCK_END: 72>, '{{+': <TokenType.BLOCK_START: 71>, '{{-': <TokenType.BLOCK_START: 71>, '+}}': <TokenType.BLOCK_END: 72>, '-}}': <TokenType.BLOCK_END: 72>, '/*+': <TokenType.HINT: 291>, '&<': <TokenType.AMP_LT: 61>, '&>': <TokenType.AMP_GT: 62>, '==': <TokenType.EQ: 28>, '::': <TokenType.DCOLON: 14>, '?::': <TokenType.QDCOLON: 367>, '||': <TokenType.DPIPE: 37>, '|>': <TokenType.PIPE_GT: 38>, '>=': <TokenType.GTE: 26>, '<=': <TokenType.LTE: 24>, '<>': <TokenType.NEQ: 29>, '!=': <TokenType.NEQ: 29>, ':=': <TokenType.COLON_EQ: 31>, '<=>': <TokenType.NULLSAFE_EQ: 30>, '->': <TokenType.ARROW: 45>, '->>': <TokenType.DARROW: 46>, '=>': <TokenType.FARROW: 47>, '#>': <TokenType.HASH_ARROW: 49>, '#>>': <TokenType.DHASH_ARROW: 50>, '<->': <TokenType.LR_ARROW: 51>, '&&': <TokenType.DAMP: 60>, '??': <TokenType.DQMARK: 18>, '~~~': <TokenType.GLOB: 285>, '~~': <TokenType.LIKE: 316>, '~~*': <TokenType.ILIKE: 293>, '~*': <TokenType.IRLIKE: 305>, '-|-': <TokenType.ADJACENT: 63>, 'ALL': <TokenType.ALL: 218>, 'AND': <TokenType.AND: 34>, 'ANTI': <TokenType.ANTI: 219>, 'ANY': <TokenType.ANY: 220>, 'ASC': <TokenType.ASC: 223>, 'AS': <TokenType.ALIAS: 216>, 'ASOF': <TokenType.ASOF: 224>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 226>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 226>, 'BEGIN': <TokenType.BEGIN: 227>, 'BETWEEN': <TokenType.BETWEEN: 228>, 'CACHE': <TokenType.CACHE: 230>, 'UNCACHE': <TokenType.UNCACHE: 411>, 'CASE': <TokenType.CASE: 231>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 232>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 233>, 'COLLATE': <TokenType.COLLATE: 234>, 'COLUMN': <TokenType.COLUMN: 79>, 'COMMIT': <TokenType.COMMIT: 237>, 'CONNECT BY': <TokenType.CONNECT_BY: 238>, 'CONSTRAINT': <TokenType.CONSTRAINT: 239>, 'COPY': <TokenType.COPY: 240>, 'CREATE': <TokenType.CREATE: 241>, 'CROSS': <TokenType.CROSS: 242>, 'CUBE': <TokenType.CUBE: 243>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 244>, 'CURRENT_SCHEMA': <TokenType.CURRENT_SCHEMA: 246>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 247>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 248>, 'CURRENT_USER': <TokenType.CURRENT_USER: 249>, 'CURRENT_CATALOG': <TokenType.CURRENT_CATALOG: 252>, 'DATABASE': <TokenType.DATABASE: 78>, 'DEFAULT': <TokenType.DEFAULT: 254>, 'DELETE': <TokenType.DELETE: 255>, 'DESC': <TokenType.DESC: 256>, 'DESCRIBE': <TokenType.DESCRIBE: 257>, 'DISTINCT': <TokenType.DISTINCT: 260>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 261>, 'DIV': <TokenType.DIV: 262>, 'DROP': <TokenType.DROP: 263>, 'ELSE': <TokenType.ELSE: 264>, 'END': <TokenType.END: 265>, 'ENUM': <TokenType.ENUM: 203>, 'ESCAPE': <TokenType.ESCAPE: 266>, 'EXCEPT': <TokenType.EXCEPT: 267>, 'EXECUTE': <TokenType.EXECUTE: 268>, 'EXISTS': <TokenType.EXISTS: 269>, 'FALSE': <TokenType.FALSE: 270>, 'FETCH': <TokenType.FETCH: 271>, 'FILTER': <TokenType.FILTER: 274>, 'FILE': <TokenType.FILE: 272>, 'FIRST': <TokenType.FIRST: 276>, 'FULL': <TokenType.FULL: 282>, 'FUNCTION': <TokenType.FUNCTION: 283>, 'FOR': <TokenType.FOR: 277>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 279>, 'FORMAT': <TokenType.FORMAT: 280>, 'FROM': <TokenType.FROM: 281>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 170>, 'GEOMETRY': <TokenType.GEOMETRY: 173>, 'GLOB': <TokenType.GLOB: 285>, 'GROUP BY': <TokenType.GROUP_BY: 288>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 289>, 'HAVING': <TokenType.HAVING: 290>, 'ILIKE': <TokenType.ILIKE: 293>, 'IN': <TokenType.IN: 294>, 'INDEX': <TokenType.INDEX: 295>, 'INET': <TokenType.INET: 198>, 'INNER': <TokenType.INNER: 297>, 'INSERT': <TokenType.INSERT: 298>, 'INTERVAL': <TokenType.INTERVAL: 302>, 'INTERSECT': <TokenType.INTERSECT: 301>, 'INTO': <TokenType.INTO: 303>, 'IS': <TokenType.IS: 306>, 'ISNULL': <TokenType.ISNULL: 307>, 'JOIN': <TokenType.JOIN: 308>, 'KEEP': <TokenType.KEEP: 310>, 'KILL': <TokenType.KILL: 312>, 'LATERAL': <TokenType.LATERAL: 314>, 'LEFT': <TokenType.LEFT: 315>, 'LIKE': <TokenType.LIKE: 316>, 'LIMIT': <TokenType.LIMIT: 317>, 'LOAD': <TokenType.LOAD: 319>, 'LOCALTIME': <TokenType.LOCALTIME: 177>, 'LOCALTIMESTAMP': <TokenType.LOCALTIMESTAMP: 178>, 'LOCK': <TokenType.LOCK: 320>, 'MERGE': <TokenType.MERGE: 326>, 'NAMESPACE': <TokenType.NAMESPACE: 438>, 'NATURAL': <TokenType.NATURAL: 329>, 'NEXT': <TokenType.NEXT: 330>, 'NOT': <TokenType.NOT: 27>, 'NOTNULL': <TokenType.NOTNULL: 332>, 'NULL': <TokenType.NULL: 333>, 'OBJECT': <TokenType.OBJECT: 197>, 'OFFSET': <TokenType.OFFSET: 335>, 'ON': <TokenType.ON: 336>, 'OR': <TokenType.OR: 35>, 'XOR': <TokenType.XOR: 64>, 'ORDER BY': <TokenType.ORDER_BY: 339>, 'ORDINALITY': <TokenType.ORDINALITY: 342>, 'OUT': <TokenType.OUT: 343>, 'OUTER': <TokenType.OUTER: 345>, 'OVER': <TokenType.OVER: 346>, 'OVERLAPS': <TokenType.OVERLAPS: 347>, 'OVERWRITE': <TokenType.OVERWRITE: 348>, 'PARTITION': <TokenType.PARTITION: 350>, 'PARTITION BY': <TokenType.PARTITION_BY: 351>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 351>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 351>, 'PERCENT': <TokenType.PERCENT: 352>, 'PIVOT': <TokenType.PIVOT: 353>, 'PRAGMA': <TokenType.PRAGMA: 358>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 360>, 'PROCEDURE': <TokenType.PROCEDURE: 361>, 'OPERATOR': <TokenType.OPERATOR: 338>, 'QUALIFY': <TokenType.QUALIFY: 365>, 'RANGE': <TokenType.RANGE: 368>, 'RECURSIVE': <TokenType.RECURSIVE: 369>, 'REGEXP': <TokenType.RLIKE: 377>, 'RENAME': <TokenType.RENAME: 371>, 'REPLACE': <TokenType.REPLACE: 372>, 'RETURNING': <TokenType.RETURNING: 373>, 'REFERENCES': <TokenType.REFERENCES: 375>, 'RIGHT': <TokenType.RIGHT: 376>, 'RLIKE': <TokenType.RLIKE: 377>, 'ROLLBACK': <TokenType.ROLLBACK: 379>, 'ROLLUP': <TokenType.ROLLUP: 380>, 'ROW': <TokenType.ROW: 381>, 'ROWS': <TokenType.ROWS: 382>, 'SCHEMA': <TokenType.SCHEMA: 81>, 'SELECT': <TokenType.SELECT: 384>, 'SEMI': <TokenType.SEMI: 385>, 'SESSION': <TokenType.SESSION: 57>, 'SESSION_USER': <TokenType.SESSION_USER: 59>, 'SET': <TokenType.SET: 389>, 'SETTINGS': <TokenType.SETTINGS: 390>, 'SHOW': <TokenType.SHOW: 391>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 392>, 'SOME': <TokenType.SOME: 393>, 'SORT BY': <TokenType.SORT_BY: 394>, 'SQL SECURITY': <TokenType.SQL_SECURITY: 396>, 'START WITH': <TokenType.START_WITH: 397>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 399>, 'TABLE': <TokenType.TABLE: 82>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 402>, 'TEMP': <TokenType.TEMPORARY: 404>, 'TEMPORARY': <TokenType.TEMPORARY: 404>, 'THEN': <TokenType.THEN: 406>, 'TRUE': <TokenType.TRUE: 407>, 'TRUNCATE': <TokenType.TRUNCATE: 408>, 'TRIGGER': <TokenType.TRIGGER: 409>, 'UNION': <TokenType.UNION: 412>, 'UNKNOWN': <TokenType.UNKNOWN: 212>, 'UNNEST': <TokenType.UNNEST: 413>, 'UNPIVOT': <TokenType.UNPIVOT: 414>, 'UPDATE': <TokenType.UPDATE: 415>, 'USE': <TokenType.USE: 416>, 'USING': <TokenType.USING: 417>, 'UUID': <TokenType.UUID: 169>, 'VALUES': <TokenType.VALUES: 418>, 'VIEW': <TokenType.VIEW: 420>, 'VOLATILE': <TokenType.VOLATILE: 422>, 'WHEN': <TokenType.WHEN: 424>, 'WHERE': <TokenType.WHERE: 425>, 'WINDOW': <TokenType.WINDOW: 426>, 'WITH': <TokenType.WITH: 427>, 'APPLY': <TokenType.APPLY: 221>, 'ARRAY': <TokenType.ARRAY: 222>, 'BIT': <TokenType.BIT: 95>, 'BOOL': <TokenType.BOOLEAN: 96>, 'BOOLEAN': <TokenType.BOOLEAN: 96>, 'BYTE': <TokenType.TINYINT: 97>, 'MEDIUMINT': <TokenType.MEDIUMINT: 101>, 'INT1': <TokenType.TINYINT: 97>, 'TINYINT': <TokenType.TINYINT: 97>, 'INT16': <TokenType.SMALLINT: 99>, 'SHORT': <TokenType.SMALLINT: 99>, 'SMALLINT': <TokenType.SMALLINT: 99>, 'HUGEINT': <TokenType.INT128: 108>, 'UHUGEINT': <TokenType.UINT128: 109>, 'INT2': <TokenType.SMALLINT: 99>, 'INTEGER': <TokenType.INT: 103>, 'INT': <TokenType.INT: 103>, 'INT4': <TokenType.INT: 103>, 'INT32': <TokenType.INT: 103>, 'INT64': <TokenType.BIGINT: 105>, 'INT128': <TokenType.INT128: 108>, 'INT256': <TokenType.INT256: 110>, 'LONG': <TokenType.BIGINT: 105>, 'BIGINT': <TokenType.BIGINT: 105>, 'INT8': <TokenType.TINYINT: 97>, 'UINT': <TokenType.UINT: 104>, 'UINT128': <TokenType.UINT128: 109>, 'UINT256': <TokenType.UINT256: 111>, 'DEC': <TokenType.DECIMAL: 115>, 'DECIMAL': <TokenType.DECIMAL: 115>, 'DECIMAL32': <TokenType.DECIMAL32: 116>, 'DECIMAL64': <TokenType.DECIMAL64: 117>, 'DECIMAL128': <TokenType.DECIMAL128: 118>, 'DECIMAL256': <TokenType.DECIMAL256: 119>, 'DECFLOAT': <TokenType.DECFLOAT: 120>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 122>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 122>, 'BIGNUM': <TokenType.BIGNUM: 107>, 'LIST': <TokenType.LIST: 318>, 'MAP': <TokenType.MAP: 321>, 'NULLABLE': <TokenType.NULLABLE: 172>, 'NUMBER': <TokenType.DECIMAL: 115>, 'NUMERIC': <TokenType.DECIMAL: 115>, 'FIXED': <TokenType.DECIMAL: 115>, 'REAL': <TokenType.FLOAT: 112>, 'FLOAT': <TokenType.FLOAT: 112>, 'FLOAT4': <TokenType.FLOAT: 112>, 'FLOAT8': <TokenType.DOUBLE: 113>, 'DOUBLE': <TokenType.DOUBLE: 113>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 113>, 'JSON': <TokenType.JSON: 139>, 'JSONB': <TokenType.JSONB: 140>, 'CHAR': <TokenType.CHAR: 123>, 'CHARACTER': <TokenType.CHAR: 123>, 'CHAR VARYING': <TokenType.VARCHAR: 125>, 'CHARACTER VARYING': <TokenType.VARCHAR: 125>, 'NCHAR': <TokenType.NCHAR: 124>, 'VARCHAR': <TokenType.VARCHAR: 125>, 'VARCHAR2': <TokenType.VARCHAR: 125>, 'NVARCHAR': <TokenType.NVARCHAR: 126>, 'NVARCHAR2': <TokenType.NVARCHAR: 126>, 'BPCHAR': <TokenType.BPCHAR: 127>, 'STR': <TokenType.TEXT: 128>, 'STRING': <TokenType.TEXT: 128>, 'TEXT': <TokenType.TEXT: 128>, 'LONGTEXT': <TokenType.LONGTEXT: 130>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 129>, 'TINYTEXT': <TokenType.TINYTEXT: 135>, 'CLOB': <TokenType.TEXT: 128>, 'LONGVARCHAR': <TokenType.TEXT: 128>, 'BINARY': <TokenType.BINARY: 137>, 'BLOB': <TokenType.VARBINARY: 138>, 'LONGBLOB': <TokenType.LONGBLOB: 133>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 132>, 'TINYBLOB': <TokenType.TINYBLOB: 134>, 'BYTEA': <TokenType.VARBINARY: 138>, 'VARBINARY': <TokenType.VARBINARY: 138>, 'TIME': <TokenType.TIME: 141>, 'TIMETZ': <TokenType.TIMETZ: 142>, 'TIME_NS': <TokenType.TIME_NS: 143>, 'TIMESTAMP': <TokenType.TIMESTAMP: 144>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 145>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 146>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 146>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 147>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 147>, 'DATE': <TokenType.DATE: 155>, 'DATETIME': <TokenType.DATETIME: 151>, 'INT4RANGE': <TokenType.INT4RANGE: 157>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 158>, 'INT8RANGE': <TokenType.INT8RANGE: 159>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 160>, 'NUMRANGE': <TokenType.NUMRANGE: 161>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 162>, 'TSRANGE': <TokenType.TSRANGE: 163>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 164>, 'TSTZRANGE': <TokenType.TSTZRANGE: 165>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 166>, 'DATERANGE': <TokenType.DATERANGE: 167>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 168>, 'UNIQUE': <TokenType.UNIQUE: 428>, 'VECTOR': <TokenType.VECTOR: 213>, 'STRUCT': <TokenType.STRUCT: 400>, 'SEQUENCE': <TokenType.SEQUENCE: 387>, 'VARIANT': <TokenType.VARIANT: 196>, 'ALTER': <TokenType.ALTER: 217>, 'ANALYZE': <TokenType.ANALYZE: 437>, 'CALL': <TokenType.COMMAND: 235>, 'COMMENT': <TokenType.COMMENT: 236>, 'EXPLAIN': <TokenType.COMMAND: 235>, 'GRANT': <TokenType.GRANT: 287>, 'REVOKE': <TokenType.REVOKE: 374>, 'OPTIMIZE': <TokenType.COMMAND: 235>, 'PREPARE': <TokenType.COMMAND: 235>, 'VACUUM': <TokenType.COMMAND: 235>, 'USER-DEFINED': <TokenType.USERDEFINED: 191>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 432>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 433>, '(+)': <TokenType.JOIN_MARKER: 309>, 'BINARY_DOUBLE': <TokenType.DOUBLE: 113>, 'BINARY_FLOAT': <TokenType.FLOAT: 112>, 'BULK COLLECT INTO': <TokenType.BULK_COLLECT_INTO: 229>, 'COLUMNS': <TokenType.COLUMN: 79>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 324>, 'MINUS': <TokenType.EXCEPT: 267>, 'ORDER SIBLINGS BY': <TokenType.ORDER_SIBLINGS_BY: 340>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 402>, 'START': <TokenType.BEGIN: 227>, 'TOP': <TokenType.TOP: 405>, 'SYSTIMESTAMP': <TokenType.SYSTIMESTAMP: 179>}
BYTE_STRING_ESCAPES: ClassVar[list[str]] = ["'"]