Edit on GitHub

sqlglot.tokens

   1from __future__ import annotations
   2
   3import os
   4import typing as t
   5from enum import auto
   6
   7from sqlglot.errors import SqlglotError, TokenError
   8from sqlglot.helper import AutoName
   9from sqlglot.trie import TrieResult, in_trie, new_trie
  10
  11if t.TYPE_CHECKING:
  12    from sqlglot.dialects.dialect import DialectType
  13
  14
  15try:
  16    from sqlglotrs import (  # type: ignore
  17        Tokenizer as RsTokenizer,
  18        TokenizerDialectSettings as RsTokenizerDialectSettings,
  19        TokenizerSettings as RsTokenizerSettings,
  20        TokenTypeSettings as RsTokenTypeSettings,
  21    )
  22
  23    USE_RS_TOKENIZER = os.environ.get("SQLGLOTRS_TOKENIZER", "1") == "1"
  24except ImportError:
  25    USE_RS_TOKENIZER = False
  26
  27
  28class TokenType(AutoName):
  29    L_PAREN = auto()
  30    R_PAREN = auto()
  31    L_BRACKET = auto()
  32    R_BRACKET = auto()
  33    L_BRACE = auto()
  34    R_BRACE = auto()
  35    COMMA = auto()
  36    DOT = auto()
  37    DASH = auto()
  38    PLUS = auto()
  39    COLON = auto()
  40    DOTCOLON = auto()
  41    DCOLON = auto()
  42    DCOLONDOLLAR = auto()
  43    DCOLONPERCENT = auto()
  44    DCOLONQMARK = auto()
  45    DQMARK = auto()
  46    SEMICOLON = auto()
  47    STAR = auto()
  48    BACKSLASH = auto()
  49    SLASH = auto()
  50    LT = auto()
  51    LTE = auto()
  52    GT = auto()
  53    GTE = auto()
  54    NOT = auto()
  55    EQ = auto()
  56    NEQ = auto()
  57    NULLSAFE_EQ = auto()
  58    COLON_EQ = auto()
  59    COLON_GT = auto()
  60    NCOLON_GT = auto()
  61    AND = auto()
  62    OR = auto()
  63    AMP = auto()
  64    DPIPE = auto()
  65    PIPE_GT = auto()
  66    PIPE = auto()
  67    PIPE_SLASH = auto()
  68    DPIPE_SLASH = auto()
  69    CARET = auto()
  70    CARET_AT = auto()
  71    TILDE = auto()
  72    ARROW = auto()
  73    DARROW = auto()
  74    FARROW = auto()
  75    HASH = auto()
  76    HASH_ARROW = auto()
  77    DHASH_ARROW = auto()
  78    LR_ARROW = auto()
  79    DAT = auto()
  80    LT_AT = auto()
  81    AT_GT = auto()
  82    DOLLAR = auto()
  83    PARAMETER = auto()
  84    SESSION = auto()
  85    SESSION_PARAMETER = auto()
  86    SESSION_USER = auto()
  87    DAMP = auto()
  88    AMP_LT = auto()
  89    AMP_GT = auto()
  90    ADJACENT = auto()
  91    XOR = auto()
  92    DSTAR = auto()
  93    QMARK_AMP = auto()
  94    QMARK_PIPE = auto()
  95    HASH_DASH = auto()
  96    EXCLAMATION = auto()
  97
  98    URI_START = auto()
  99
 100    BLOCK_START = auto()
 101    BLOCK_END = auto()
 102
 103    SPACE = auto()
 104    BREAK = auto()
 105
 106    STRING = auto()
 107    NUMBER = auto()
 108    IDENTIFIER = auto()
 109    DATABASE = auto()
 110    COLUMN = auto()
 111    COLUMN_DEF = auto()
 112    SCHEMA = auto()
 113    TABLE = auto()
 114    WAREHOUSE = auto()
 115    STAGE = auto()
 116    STREAMLIT = auto()
 117    VAR = auto()
 118    BIT_STRING = auto()
 119    HEX_STRING = auto()
 120    BYTE_STRING = auto()
 121    NATIONAL_STRING = auto()
 122    RAW_STRING = auto()
 123    HEREDOC_STRING = auto()
 124    UNICODE_STRING = auto()
 125
 126    # types
 127    BIT = auto()
 128    BOOLEAN = auto()
 129    TINYINT = auto()
 130    UTINYINT = auto()
 131    SMALLINT = auto()
 132    USMALLINT = auto()
 133    MEDIUMINT = auto()
 134    UMEDIUMINT = auto()
 135    INT = auto()
 136    UINT = auto()
 137    BIGINT = auto()
 138    UBIGINT = auto()
 139    BIGNUM = auto()  # unlimited precision int
 140    INT128 = auto()
 141    UINT128 = auto()
 142    INT256 = auto()
 143    UINT256 = auto()
 144    FLOAT = auto()
 145    DOUBLE = auto()
 146    UDOUBLE = auto()
 147    DECIMAL = auto()
 148    DECIMAL32 = auto()
 149    DECIMAL64 = auto()
 150    DECIMAL128 = auto()
 151    DECIMAL256 = auto()
 152    DECFLOAT = auto()
 153    UDECIMAL = auto()
 154    BIGDECIMAL = auto()
 155    CHAR = auto()
 156    NCHAR = auto()
 157    VARCHAR = auto()
 158    NVARCHAR = auto()
 159    BPCHAR = auto()
 160    TEXT = auto()
 161    MEDIUMTEXT = auto()
 162    LONGTEXT = auto()
 163    BLOB = auto()
 164    MEDIUMBLOB = auto()
 165    LONGBLOB = auto()
 166    TINYBLOB = auto()
 167    TINYTEXT = auto()
 168    NAME = auto()
 169    BINARY = auto()
 170    VARBINARY = auto()
 171    JSON = auto()
 172    JSONB = auto()
 173    TIME = auto()
 174    TIMETZ = auto()
 175    TIME_NS = auto()
 176    TIMESTAMP = auto()
 177    TIMESTAMPTZ = auto()
 178    TIMESTAMPLTZ = auto()
 179    TIMESTAMPNTZ = auto()
 180    TIMESTAMP_S = auto()
 181    TIMESTAMP_MS = auto()
 182    TIMESTAMP_NS = auto()
 183    DATETIME = auto()
 184    DATETIME2 = auto()
 185    DATETIME64 = auto()
 186    SMALLDATETIME = auto()
 187    DATE = auto()
 188    DATE32 = auto()
 189    INT4RANGE = auto()
 190    INT4MULTIRANGE = auto()
 191    INT8RANGE = auto()
 192    INT8MULTIRANGE = auto()
 193    NUMRANGE = auto()
 194    NUMMULTIRANGE = auto()
 195    TSRANGE = auto()
 196    TSMULTIRANGE = auto()
 197    TSTZRANGE = auto()
 198    TSTZMULTIRANGE = auto()
 199    DATERANGE = auto()
 200    DATEMULTIRANGE = auto()
 201    UUID = auto()
 202    GEOGRAPHY = auto()
 203    GEOGRAPHYPOINT = auto()
 204    NULLABLE = auto()
 205    GEOMETRY = auto()
 206    POINT = auto()
 207    RING = auto()
 208    LINESTRING = auto()
 209    LOCALTIME = auto()
 210    LOCALTIMESTAMP = auto()
 211    SYSTIMESTAMP = auto()
 212    MULTILINESTRING = auto()
 213    POLYGON = auto()
 214    MULTIPOLYGON = auto()
 215    HLLSKETCH = auto()
 216    HSTORE = auto()
 217    SUPER = auto()
 218    SERIAL = auto()
 219    SMALLSERIAL = auto()
 220    BIGSERIAL = auto()
 221    XML = auto()
 222    YEAR = auto()
 223    USERDEFINED = auto()
 224    MONEY = auto()
 225    SMALLMONEY = auto()
 226    ROWVERSION = auto()
 227    IMAGE = auto()
 228    VARIANT = auto()
 229    OBJECT = auto()
 230    INET = auto()
 231    IPADDRESS = auto()
 232    IPPREFIX = auto()
 233    IPV4 = auto()
 234    IPV6 = auto()
 235    ENUM = auto()
 236    ENUM8 = auto()
 237    ENUM16 = auto()
 238    FIXEDSTRING = auto()
 239    LOWCARDINALITY = auto()
 240    NESTED = auto()
 241    AGGREGATEFUNCTION = auto()
 242    SIMPLEAGGREGATEFUNCTION = auto()
 243    TDIGEST = auto()
 244    UNKNOWN = auto()
 245    VECTOR = auto()
 246    DYNAMIC = auto()
 247    VOID = auto()
 248
 249    # keywords
 250    ALIAS = auto()
 251    ALTER = auto()
 252    ALL = auto()
 253    ANTI = auto()
 254    ANY = auto()
 255    APPLY = auto()
 256    ARRAY = auto()
 257    ASC = auto()
 258    ASOF = auto()
 259    ATTACH = auto()
 260    AUTO_INCREMENT = auto()
 261    BEGIN = auto()
 262    BETWEEN = auto()
 263    BULK_COLLECT_INTO = auto()
 264    CACHE = auto()
 265    CASE = auto()
 266    CHARACTER_SET = auto()
 267    CLUSTER_BY = auto()
 268    COLLATE = auto()
 269    COMMAND = auto()
 270    COMMENT = auto()
 271    COMMIT = auto()
 272    CONNECT_BY = auto()
 273    CONSTRAINT = auto()
 274    COPY = auto()
 275    CREATE = auto()
 276    CROSS = auto()
 277    CUBE = auto()
 278    CURRENT_DATE = auto()
 279    CURRENT_DATETIME = auto()
 280    CURRENT_SCHEMA = auto()
 281    CURRENT_TIME = auto()
 282    CURRENT_TIMESTAMP = auto()
 283    CURRENT_USER = auto()
 284    CURRENT_ROLE = auto()
 285    CURRENT_CATALOG = auto()
 286    DECLARE = auto()
 287    DEFAULT = auto()
 288    DELETE = auto()
 289    DESC = auto()
 290    DESCRIBE = auto()
 291    DETACH = auto()
 292    DICTIONARY = auto()
 293    DISTINCT = auto()
 294    DISTRIBUTE_BY = auto()
 295    DIV = auto()
 296    DROP = auto()
 297    ELSE = auto()
 298    END = auto()
 299    ESCAPE = auto()
 300    EXCEPT = auto()
 301    EXECUTE = auto()
 302    EXISTS = auto()
 303    FALSE = auto()
 304    FETCH = auto()
 305    FILE = auto()
 306    FILE_FORMAT = auto()
 307    FILTER = auto()
 308    FINAL = auto()
 309    FIRST = auto()
 310    FOR = auto()
 311    FORCE = auto()
 312    FOREIGN_KEY = auto()
 313    FORMAT = auto()
 314    FROM = auto()
 315    FULL = auto()
 316    FUNCTION = auto()
 317    GET = auto()
 318    GLOB = auto()
 319    GLOBAL = auto()
 320    GRANT = auto()
 321    GROUP_BY = auto()
 322    GROUPING_SETS = auto()
 323    HAVING = auto()
 324    HINT = auto()
 325    IGNORE = auto()
 326    ILIKE = auto()
 327    IN = auto()
 328    INDEX = auto()
 329    INDEXED_BY = auto()
 330    INNER = auto()
 331    INSERT = auto()
 332    INSTALL = auto()
 333    INTERSECT = auto()
 334    INTERVAL = auto()
 335    INTO = auto()
 336    INTRODUCER = auto()
 337    IRLIKE = auto()
 338    IS = auto()
 339    ISNULL = auto()
 340    JOIN = auto()
 341    JOIN_MARKER = auto()
 342    KEEP = auto()
 343    KEY = auto()
 344    KILL = auto()
 345    LANGUAGE = auto()
 346    LATERAL = auto()
 347    LEFT = auto()
 348    LIKE = auto()
 349    LIMIT = auto()
 350    LIST = auto()
 351    LOAD = auto()
 352    LOCK = auto()
 353    MAP = auto()
 354    MATCH = auto()
 355    MATCH_CONDITION = auto()
 356    MATCH_RECOGNIZE = auto()
 357    MEMBER_OF = auto()
 358    MERGE = auto()
 359    MOD = auto()
 360    MODEL = auto()
 361    NATURAL = auto()
 362    NEXT = auto()
 363    NOTHING = auto()
 364    NOTNULL = auto()
 365    NULL = auto()
 366    OBJECT_IDENTIFIER = auto()
 367    OFFSET = auto()
 368    ON = auto()
 369    ONLY = auto()
 370    OPERATOR = auto()
 371    ORDER_BY = auto()
 372    ORDER_SIBLINGS_BY = auto()
 373    ORDERED = auto()
 374    ORDINALITY = auto()
 375    OUT = auto()
 376    INOUT = auto()
 377    OUTER = auto()
 378    OVER = auto()
 379    OVERLAPS = auto()
 380    OVERWRITE = auto()
 381    PARTITION = auto()
 382    PARTITION_BY = auto()
 383    PERCENT = auto()
 384    PIVOT = auto()
 385    PLACEHOLDER = auto()
 386    POSITIONAL = auto()
 387    PRAGMA = auto()
 388    PREWHERE = auto()
 389    PRIMARY_KEY = auto()
 390    PROCEDURE = auto()
 391    PROPERTIES = auto()
 392    PSEUDO_TYPE = auto()
 393    PUT = auto()
 394    QUALIFY = auto()
 395    QUOTE = auto()
 396    QDCOLON = auto()
 397    RANGE = auto()
 398    RECURSIVE = auto()
 399    REFRESH = auto()
 400    RENAME = auto()
 401    REPLACE = auto()
 402    RETURNING = auto()
 403    REVOKE = auto()
 404    REFERENCES = auto()
 405    RIGHT = auto()
 406    RLIKE = auto()
 407    ROLLBACK = auto()
 408    ROLLUP = auto()
 409    ROW = auto()
 410    ROWS = auto()
 411    SELECT = auto()
 412    SEMI = auto()
 413    SEPARATOR = auto()
 414    SEQUENCE = auto()
 415    SERDE_PROPERTIES = auto()
 416    SET = auto()
 417    SETTINGS = auto()
 418    SHOW = auto()
 419    SIMILAR_TO = auto()
 420    SOME = auto()
 421    SORT_BY = auto()
 422    SOUNDS_LIKE = auto()
 423    START_WITH = auto()
 424    STORAGE_INTEGRATION = auto()
 425    STRAIGHT_JOIN = auto()
 426    STRUCT = auto()
 427    SUMMARIZE = auto()
 428    TABLE_SAMPLE = auto()
 429    TAG = auto()
 430    TEMPORARY = auto()
 431    TOP = auto()
 432    THEN = auto()
 433    TRUE = auto()
 434    TRUNCATE = auto()
 435    UNCACHE = auto()
 436    UNION = auto()
 437    UNNEST = auto()
 438    UNPIVOT = auto()
 439    UPDATE = auto()
 440    USE = auto()
 441    USING = auto()
 442    VALUES = auto()
 443    VARIADIC = auto()
 444    VIEW = auto()
 445    SEMANTIC_VIEW = auto()
 446    VOLATILE = auto()
 447    WHEN = auto()
 448    WHERE = auto()
 449    WINDOW = auto()
 450    WITH = auto()
 451    UNIQUE = auto()
 452    UTC_DATE = auto()
 453    UTC_TIME = auto()
 454    UTC_TIMESTAMP = auto()
 455    VERSION_SNAPSHOT = auto()
 456    TIMESTAMP_SNAPSHOT = auto()
 457    OPTION = auto()
 458    SINK = auto()
 459    SOURCE = auto()
 460    ANALYZE = auto()
 461    NAMESPACE = auto()
 462    EXPORT = auto()
 463
 464    # sentinel
 465    HIVE_TOKEN_STREAM = auto()
 466
 467
 468_ALL_TOKEN_TYPES = list(TokenType)
 469_TOKEN_TYPE_TO_INDEX = {token_type: i for i, token_type in enumerate(_ALL_TOKEN_TYPES)}
 470
 471
 472class Token:
 473    __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments")
 474
 475    @classmethod
 476    def number(cls, number: int) -> Token:
 477        """Returns a NUMBER token with `number` as its text."""
 478        return cls(TokenType.NUMBER, str(number))
 479
 480    @classmethod
 481    def string(cls, string: str) -> Token:
 482        """Returns a STRING token with `string` as its text."""
 483        return cls(TokenType.STRING, string)
 484
 485    @classmethod
 486    def identifier(cls, identifier: str) -> Token:
 487        """Returns an IDENTIFIER token with `identifier` as its text."""
 488        return cls(TokenType.IDENTIFIER, identifier)
 489
 490    @classmethod
 491    def var(cls, var: str) -> Token:
 492        """Returns an VAR token with `var` as its text."""
 493        return cls(TokenType.VAR, var)
 494
 495    def __init__(
 496        self,
 497        token_type: TokenType,
 498        text: str,
 499        line: int = 1,
 500        col: int = 1,
 501        start: int = 0,
 502        end: int = 0,
 503        comments: t.Optional[t.List[str]] = None,
 504    ) -> None:
 505        """Token initializer.
 506
 507        Args:
 508            token_type: The TokenType Enum.
 509            text: The text of the token.
 510            line: The line that the token ends on.
 511            col: The column that the token ends on.
 512            start: The start index of the token.
 513            end: The ending index of the token.
 514            comments: The comments to attach to the token.
 515        """
 516        self.token_type = token_type
 517        self.text = text
 518        self.line = line
 519        self.col = col
 520        self.start = start
 521        self.end = end
 522        self.comments = [] if comments is None else comments
 523
 524    def __repr__(self) -> str:
 525        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
 526        return f"<Token {attributes}>"
 527
 528
 529class _Tokenizer(type):
 530    def __new__(cls, clsname, bases, attrs):
 531        klass = super().__new__(cls, clsname, bases, attrs)
 532
 533        def _convert_quotes(arr: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]:
 534            return dict(
 535                (item, item) if isinstance(item, str) else (item[0], item[1]) for item in arr
 536            )
 537
 538        def _quotes_to_format(
 539            token_type: TokenType, arr: t.List[str | t.Tuple[str, str]]
 540        ) -> t.Dict[str, t.Tuple[str, TokenType]]:
 541            return {k: (v, token_type) for k, v in _convert_quotes(arr).items()}
 542
 543        klass._QUOTES = _convert_quotes(klass.QUOTES)
 544        klass._IDENTIFIERS = _convert_quotes(klass.IDENTIFIERS)
 545
 546        klass._FORMAT_STRINGS = {
 547            **{
 548                p + s: (e, TokenType.NATIONAL_STRING)
 549                for s, e in klass._QUOTES.items()
 550                for p in ("n", "N")
 551            },
 552            **_quotes_to_format(TokenType.BIT_STRING, klass.BIT_STRINGS),
 553            **_quotes_to_format(TokenType.BYTE_STRING, klass.BYTE_STRINGS),
 554            **_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS),
 555            **_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS),
 556            **_quotes_to_format(TokenType.HEREDOC_STRING, klass.HEREDOC_STRINGS),
 557            **_quotes_to_format(TokenType.UNICODE_STRING, klass.UNICODE_STRINGS),
 558        }
 559
 560        if "BYTE_STRING_ESCAPES" not in klass.__dict__:
 561            klass.BYTE_STRING_ESCAPES = klass.STRING_ESCAPES.copy()
 562
 563        klass._STRING_ESCAPES = set(klass.STRING_ESCAPES)
 564        klass._BYTE_STRING_ESCAPES = set(klass.BYTE_STRING_ESCAPES)
 565        klass._ESCAPE_FOLLOW_CHARS = set(klass.ESCAPE_FOLLOW_CHARS)
 566        klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES)
 567        klass._COMMENTS = {
 568            **dict(
 569                (comment, None) if isinstance(comment, str) else (comment[0], comment[1])
 570                for comment in klass.COMMENTS
 571            ),
 572            "{#": "#}",  # Ensure Jinja comments are tokenized correctly in all dialects
 573        }
 574        if klass.HINT_START in klass.KEYWORDS:
 575            klass._COMMENTS[klass.HINT_START] = "*/"
 576
 577        klass._KEYWORD_TRIE = new_trie(
 578            key.upper()
 579            for key in (
 580                *klass.KEYWORDS,
 581                *klass._COMMENTS,
 582                *klass._QUOTES,
 583                *klass._FORMAT_STRINGS,
 584            )
 585            if " " in key or any(single in key for single in klass.SINGLE_TOKENS)
 586        )
 587
 588        if USE_RS_TOKENIZER:
 589            settings = RsTokenizerSettings(
 590                white_space={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.WHITE_SPACE.items()},
 591                single_tokens={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.SINGLE_TOKENS.items()},
 592                keywords={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.KEYWORDS.items()},
 593                numeric_literals=klass.NUMERIC_LITERALS,
 594                identifiers=klass._IDENTIFIERS,
 595                identifier_escapes=klass._IDENTIFIER_ESCAPES,
 596                string_escapes=klass._STRING_ESCAPES,
 597                byte_string_escapes=klass._BYTE_STRING_ESCAPES,
 598                quotes=klass._QUOTES,
 599                format_strings={
 600                    k: (v1, _TOKEN_TYPE_TO_INDEX[v2])
 601                    for k, (v1, v2) in klass._FORMAT_STRINGS.items()
 602                },
 603                has_bit_strings=bool(klass.BIT_STRINGS),
 604                has_hex_strings=bool(klass.HEX_STRINGS),
 605                comments=klass._COMMENTS,
 606                var_single_tokens=klass.VAR_SINGLE_TOKENS,
 607                commands={_TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMANDS},
 608                command_prefix_tokens={
 609                    _TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMAND_PREFIX_TOKENS
 610                },
 611                heredoc_tag_is_identifier=klass.HEREDOC_TAG_IS_IDENTIFIER,
 612                string_escapes_allowed_in_raw_strings=klass.STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS,
 613                nested_comments=klass.NESTED_COMMENTS,
 614                hint_start=klass.HINT_START,
 615                tokens_preceding_hint={
 616                    _TOKEN_TYPE_TO_INDEX[v] for v in klass.TOKENS_PRECEDING_HINT
 617                },
 618                escape_follow_chars=klass._ESCAPE_FOLLOW_CHARS,
 619            )
 620            token_types = RsTokenTypeSettings(
 621                bit_string=_TOKEN_TYPE_TO_INDEX[TokenType.BIT_STRING],
 622                byte_string=_TOKEN_TYPE_TO_INDEX[TokenType.BYTE_STRING],
 623                break_=_TOKEN_TYPE_TO_INDEX[TokenType.BREAK],
 624                dcolon=_TOKEN_TYPE_TO_INDEX[TokenType.DCOLON],
 625                heredoc_string=_TOKEN_TYPE_TO_INDEX[TokenType.HEREDOC_STRING],
 626                raw_string=_TOKEN_TYPE_TO_INDEX[TokenType.RAW_STRING],
 627                hex_string=_TOKEN_TYPE_TO_INDEX[TokenType.HEX_STRING],
 628                identifier=_TOKEN_TYPE_TO_INDEX[TokenType.IDENTIFIER],
 629                number=_TOKEN_TYPE_TO_INDEX[TokenType.NUMBER],
 630                parameter=_TOKEN_TYPE_TO_INDEX[TokenType.PARAMETER],
 631                semicolon=_TOKEN_TYPE_TO_INDEX[TokenType.SEMICOLON],
 632                string=_TOKEN_TYPE_TO_INDEX[TokenType.STRING],
 633                var=_TOKEN_TYPE_TO_INDEX[TokenType.VAR],
 634                heredoc_string_alternative=_TOKEN_TYPE_TO_INDEX[klass.HEREDOC_STRING_ALTERNATIVE],
 635                hint=_TOKEN_TYPE_TO_INDEX[TokenType.HINT],
 636            )
 637            klass._RS_TOKENIZER = RsTokenizer(settings, token_types)
 638        else:
 639            klass._RS_TOKENIZER = None
 640
 641        return klass
 642
 643
 644class Tokenizer(metaclass=_Tokenizer):
 645    SINGLE_TOKENS = {
 646        "(": TokenType.L_PAREN,
 647        ")": TokenType.R_PAREN,
 648        "[": TokenType.L_BRACKET,
 649        "]": TokenType.R_BRACKET,
 650        "{": TokenType.L_BRACE,
 651        "}": TokenType.R_BRACE,
 652        "&": TokenType.AMP,
 653        "^": TokenType.CARET,
 654        ":": TokenType.COLON,
 655        ",": TokenType.COMMA,
 656        ".": TokenType.DOT,
 657        "-": TokenType.DASH,
 658        "=": TokenType.EQ,
 659        ">": TokenType.GT,
 660        "<": TokenType.LT,
 661        "%": TokenType.MOD,
 662        "!": TokenType.NOT,
 663        "|": TokenType.PIPE,
 664        "+": TokenType.PLUS,
 665        ";": TokenType.SEMICOLON,
 666        "/": TokenType.SLASH,
 667        "\\": TokenType.BACKSLASH,
 668        "*": TokenType.STAR,
 669        "~": TokenType.TILDE,
 670        "?": TokenType.PLACEHOLDER,
 671        "@": TokenType.PARAMETER,
 672        "#": TokenType.HASH,
 673        # Used for breaking a var like x'y' but nothing else the token type doesn't matter
 674        "'": TokenType.UNKNOWN,
 675        "`": TokenType.UNKNOWN,
 676        '"': TokenType.UNKNOWN,
 677    }
 678
 679    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 680    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 681    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 682    RAW_STRINGS: t.List[str | t.Tuple[str, str]] = []
 683    HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = []
 684    UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 685    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 686    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 687    STRING_ESCAPES = ["'"]
 688    BYTE_STRING_ESCAPES: t.List[str] = []
 689    VAR_SINGLE_TOKENS: t.Set[str] = set()
 690    ESCAPE_FOLLOW_CHARS: t.List[str] = []
 691
 692    # The strings in this list can always be used as escapes, regardless of the surrounding
 693    # identifier delimiters. By default, the closing delimiter is assumed to also act as an
 694    # identifier escape, e.g. if we use double-quotes, then they also act as escapes: "x"""
 695    IDENTIFIER_ESCAPES: t.List[str] = []
 696
 697    # Whether the heredoc tags follow the same lexical rules as unquoted identifiers
 698    HEREDOC_TAG_IS_IDENTIFIER = False
 699
 700    # Token that we'll generate as a fallback if the heredoc prefix doesn't correspond to a heredoc
 701    HEREDOC_STRING_ALTERNATIVE = TokenType.VAR
 702
 703    # Whether string escape characters function as such when placed within raw strings
 704    STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS = True
 705
 706    NESTED_COMMENTS = True
 707
 708    HINT_START = "/*+"
 709
 710    TOKENS_PRECEDING_HINT = {TokenType.SELECT, TokenType.INSERT, TokenType.UPDATE, TokenType.DELETE}
 711
 712    # Autofilled
 713    _COMMENTS: t.Dict[str, str] = {}
 714    _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {}
 715    _IDENTIFIERS: t.Dict[str, str] = {}
 716    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 717    _QUOTES: t.Dict[str, str] = {}
 718    _STRING_ESCAPES: t.Set[str] = set()
 719    _BYTE_STRING_ESCAPES: t.Set[str] = set()
 720    _KEYWORD_TRIE: t.Dict = {}
 721    _RS_TOKENIZER: t.Optional[t.Any] = None
 722    _ESCAPE_FOLLOW_CHARS: t.Set[str] = set()
 723
 724    KEYWORDS: t.Dict[str, TokenType] = {
 725        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
 726        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
 727        **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")},
 728        **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")},
 729        HINT_START: TokenType.HINT,
 730        "&<": TokenType.AMP_LT,
 731        "&>": TokenType.AMP_GT,
 732        "==": TokenType.EQ,
 733        "::": TokenType.DCOLON,
 734        "?::": TokenType.QDCOLON,
 735        "||": TokenType.DPIPE,
 736        "|>": TokenType.PIPE_GT,
 737        ">=": TokenType.GTE,
 738        "<=": TokenType.LTE,
 739        "<>": TokenType.NEQ,
 740        "!=": TokenType.NEQ,
 741        ":=": TokenType.COLON_EQ,
 742        "<=>": TokenType.NULLSAFE_EQ,
 743        "->": TokenType.ARROW,
 744        "->>": TokenType.DARROW,
 745        "=>": TokenType.FARROW,
 746        "#>": TokenType.HASH_ARROW,
 747        "#>>": TokenType.DHASH_ARROW,
 748        "<->": TokenType.LR_ARROW,
 749        "&&": TokenType.DAMP,
 750        "??": TokenType.DQMARK,
 751        "~~~": TokenType.GLOB,
 752        "~~": TokenType.LIKE,
 753        "~~*": TokenType.ILIKE,
 754        "~*": TokenType.IRLIKE,
 755        "-|-": TokenType.ADJACENT,
 756        "ALL": TokenType.ALL,
 757        "AND": TokenType.AND,
 758        "ANTI": TokenType.ANTI,
 759        "ANY": TokenType.ANY,
 760        "ASC": TokenType.ASC,
 761        "AS": TokenType.ALIAS,
 762        "ASOF": TokenType.ASOF,
 763        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 764        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 765        "BEGIN": TokenType.BEGIN,
 766        "BETWEEN": TokenType.BETWEEN,
 767        "CACHE": TokenType.CACHE,
 768        "UNCACHE": TokenType.UNCACHE,
 769        "CASE": TokenType.CASE,
 770        "CHARACTER SET": TokenType.CHARACTER_SET,
 771        "CLUSTER BY": TokenType.CLUSTER_BY,
 772        "COLLATE": TokenType.COLLATE,
 773        "COLUMN": TokenType.COLUMN,
 774        "COMMIT": TokenType.COMMIT,
 775        "CONNECT BY": TokenType.CONNECT_BY,
 776        "CONSTRAINT": TokenType.CONSTRAINT,
 777        "COPY": TokenType.COPY,
 778        "CREATE": TokenType.CREATE,
 779        "CROSS": TokenType.CROSS,
 780        "CUBE": TokenType.CUBE,
 781        "CURRENT_DATE": TokenType.CURRENT_DATE,
 782        "CURRENT_SCHEMA": TokenType.CURRENT_SCHEMA,
 783        "CURRENT_TIME": TokenType.CURRENT_TIME,
 784        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 785        "CURRENT_USER": TokenType.CURRENT_USER,
 786        "CURRENT_CATALOG": TokenType.CURRENT_CATALOG,
 787        "DATABASE": TokenType.DATABASE,
 788        "DEFAULT": TokenType.DEFAULT,
 789        "DELETE": TokenType.DELETE,
 790        "DESC": TokenType.DESC,
 791        "DESCRIBE": TokenType.DESCRIBE,
 792        "DISTINCT": TokenType.DISTINCT,
 793        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
 794        "DIV": TokenType.DIV,
 795        "DROP": TokenType.DROP,
 796        "ELSE": TokenType.ELSE,
 797        "END": TokenType.END,
 798        "ENUM": TokenType.ENUM,
 799        "ESCAPE": TokenType.ESCAPE,
 800        "EXCEPT": TokenType.EXCEPT,
 801        "EXECUTE": TokenType.EXECUTE,
 802        "EXISTS": TokenType.EXISTS,
 803        "FALSE": TokenType.FALSE,
 804        "FETCH": TokenType.FETCH,
 805        "FILTER": TokenType.FILTER,
 806        "FILE": TokenType.FILE,
 807        "FIRST": TokenType.FIRST,
 808        "FULL": TokenType.FULL,
 809        "FUNCTION": TokenType.FUNCTION,
 810        "FOR": TokenType.FOR,
 811        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 812        "FORMAT": TokenType.FORMAT,
 813        "FROM": TokenType.FROM,
 814        "GEOGRAPHY": TokenType.GEOGRAPHY,
 815        "GEOMETRY": TokenType.GEOMETRY,
 816        "GLOB": TokenType.GLOB,
 817        "GROUP BY": TokenType.GROUP_BY,
 818        "GROUPING SETS": TokenType.GROUPING_SETS,
 819        "HAVING": TokenType.HAVING,
 820        "ILIKE": TokenType.ILIKE,
 821        "IN": TokenType.IN,
 822        "INDEX": TokenType.INDEX,
 823        "INET": TokenType.INET,
 824        "INNER": TokenType.INNER,
 825        "INSERT": TokenType.INSERT,
 826        "INTERVAL": TokenType.INTERVAL,
 827        "INTERSECT": TokenType.INTERSECT,
 828        "INTO": TokenType.INTO,
 829        "IS": TokenType.IS,
 830        "ISNULL": TokenType.ISNULL,
 831        "JOIN": TokenType.JOIN,
 832        "KEEP": TokenType.KEEP,
 833        "KILL": TokenType.KILL,
 834        "LATERAL": TokenType.LATERAL,
 835        "LEFT": TokenType.LEFT,
 836        "LIKE": TokenType.LIKE,
 837        "LIMIT": TokenType.LIMIT,
 838        "LOAD": TokenType.LOAD,
 839        "LOCALTIME": TokenType.LOCALTIME,
 840        "LOCALTIMESTAMP": TokenType.LOCALTIMESTAMP,
 841        "LOCK": TokenType.LOCK,
 842        "MERGE": TokenType.MERGE,
 843        "NAMESPACE": TokenType.NAMESPACE,
 844        "NATURAL": TokenType.NATURAL,
 845        "NEXT": TokenType.NEXT,
 846        "NOT": TokenType.NOT,
 847        "NOTNULL": TokenType.NOTNULL,
 848        "NULL": TokenType.NULL,
 849        "OBJECT": TokenType.OBJECT,
 850        "OFFSET": TokenType.OFFSET,
 851        "ON": TokenType.ON,
 852        "OR": TokenType.OR,
 853        "XOR": TokenType.XOR,
 854        "ORDER BY": TokenType.ORDER_BY,
 855        "ORDINALITY": TokenType.ORDINALITY,
 856        "OUT": TokenType.OUT,
 857        "OUTER": TokenType.OUTER,
 858        "OVER": TokenType.OVER,
 859        "OVERLAPS": TokenType.OVERLAPS,
 860        "OVERWRITE": TokenType.OVERWRITE,
 861        "PARTITION": TokenType.PARTITION,
 862        "PARTITION BY": TokenType.PARTITION_BY,
 863        "PARTITIONED BY": TokenType.PARTITION_BY,
 864        "PARTITIONED_BY": TokenType.PARTITION_BY,
 865        "PERCENT": TokenType.PERCENT,
 866        "PIVOT": TokenType.PIVOT,
 867        "PRAGMA": TokenType.PRAGMA,
 868        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 869        "PROCEDURE": TokenType.PROCEDURE,
 870        "OPERATOR": TokenType.OPERATOR,
 871        "QUALIFY": TokenType.QUALIFY,
 872        "RANGE": TokenType.RANGE,
 873        "RECURSIVE": TokenType.RECURSIVE,
 874        "REGEXP": TokenType.RLIKE,
 875        "RENAME": TokenType.RENAME,
 876        "REPLACE": TokenType.REPLACE,
 877        "RETURNING": TokenType.RETURNING,
 878        "REFERENCES": TokenType.REFERENCES,
 879        "RIGHT": TokenType.RIGHT,
 880        "RLIKE": TokenType.RLIKE,
 881        "ROLLBACK": TokenType.ROLLBACK,
 882        "ROLLUP": TokenType.ROLLUP,
 883        "ROW": TokenType.ROW,
 884        "ROWS": TokenType.ROWS,
 885        "SCHEMA": TokenType.SCHEMA,
 886        "SELECT": TokenType.SELECT,
 887        "SEMI": TokenType.SEMI,
 888        "SESSION": TokenType.SESSION,
 889        "SESSION_USER": TokenType.SESSION_USER,
 890        "SET": TokenType.SET,
 891        "SETTINGS": TokenType.SETTINGS,
 892        "SHOW": TokenType.SHOW,
 893        "SIMILAR TO": TokenType.SIMILAR_TO,
 894        "SOME": TokenType.SOME,
 895        "SORT BY": TokenType.SORT_BY,
 896        "START WITH": TokenType.START_WITH,
 897        "STRAIGHT_JOIN": TokenType.STRAIGHT_JOIN,
 898        "TABLE": TokenType.TABLE,
 899        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 900        "TEMP": TokenType.TEMPORARY,
 901        "TEMPORARY": TokenType.TEMPORARY,
 902        "THEN": TokenType.THEN,
 903        "TRUE": TokenType.TRUE,
 904        "TRUNCATE": TokenType.TRUNCATE,
 905        "UNION": TokenType.UNION,
 906        "UNKNOWN": TokenType.UNKNOWN,
 907        "UNNEST": TokenType.UNNEST,
 908        "UNPIVOT": TokenType.UNPIVOT,
 909        "UPDATE": TokenType.UPDATE,
 910        "USE": TokenType.USE,
 911        "USING": TokenType.USING,
 912        "UUID": TokenType.UUID,
 913        "VALUES": TokenType.VALUES,
 914        "VIEW": TokenType.VIEW,
 915        "VOLATILE": TokenType.VOLATILE,
 916        "WHEN": TokenType.WHEN,
 917        "WHERE": TokenType.WHERE,
 918        "WINDOW": TokenType.WINDOW,
 919        "WITH": TokenType.WITH,
 920        "APPLY": TokenType.APPLY,
 921        "ARRAY": TokenType.ARRAY,
 922        "BIT": TokenType.BIT,
 923        "BOOL": TokenType.BOOLEAN,
 924        "BOOLEAN": TokenType.BOOLEAN,
 925        "BYTE": TokenType.TINYINT,
 926        "MEDIUMINT": TokenType.MEDIUMINT,
 927        "INT1": TokenType.TINYINT,
 928        "TINYINT": TokenType.TINYINT,
 929        "INT16": TokenType.SMALLINT,
 930        "SHORT": TokenType.SMALLINT,
 931        "SMALLINT": TokenType.SMALLINT,
 932        "HUGEINT": TokenType.INT128,
 933        "UHUGEINT": TokenType.UINT128,
 934        "INT2": TokenType.SMALLINT,
 935        "INTEGER": TokenType.INT,
 936        "INT": TokenType.INT,
 937        "INT4": TokenType.INT,
 938        "INT32": TokenType.INT,
 939        "INT64": TokenType.BIGINT,
 940        "INT128": TokenType.INT128,
 941        "INT256": TokenType.INT256,
 942        "LONG": TokenType.BIGINT,
 943        "BIGINT": TokenType.BIGINT,
 944        "INT8": TokenType.TINYINT,
 945        "UINT": TokenType.UINT,
 946        "UINT128": TokenType.UINT128,
 947        "UINT256": TokenType.UINT256,
 948        "DEC": TokenType.DECIMAL,
 949        "DECIMAL": TokenType.DECIMAL,
 950        "DECIMAL32": TokenType.DECIMAL32,
 951        "DECIMAL64": TokenType.DECIMAL64,
 952        "DECIMAL128": TokenType.DECIMAL128,
 953        "DECIMAL256": TokenType.DECIMAL256,
 954        "DECFLOAT": TokenType.DECFLOAT,
 955        "BIGDECIMAL": TokenType.BIGDECIMAL,
 956        "BIGNUMERIC": TokenType.BIGDECIMAL,
 957        "BIGNUM": TokenType.BIGNUM,
 958        "LIST": TokenType.LIST,
 959        "MAP": TokenType.MAP,
 960        "NULLABLE": TokenType.NULLABLE,
 961        "NUMBER": TokenType.DECIMAL,
 962        "NUMERIC": TokenType.DECIMAL,
 963        "FIXED": TokenType.DECIMAL,
 964        "REAL": TokenType.FLOAT,
 965        "FLOAT": TokenType.FLOAT,
 966        "FLOAT4": TokenType.FLOAT,
 967        "FLOAT8": TokenType.DOUBLE,
 968        "DOUBLE": TokenType.DOUBLE,
 969        "DOUBLE PRECISION": TokenType.DOUBLE,
 970        "JSON": TokenType.JSON,
 971        "JSONB": TokenType.JSONB,
 972        "CHAR": TokenType.CHAR,
 973        "CHARACTER": TokenType.CHAR,
 974        "CHAR VARYING": TokenType.VARCHAR,
 975        "CHARACTER VARYING": TokenType.VARCHAR,
 976        "NCHAR": TokenType.NCHAR,
 977        "VARCHAR": TokenType.VARCHAR,
 978        "VARCHAR2": TokenType.VARCHAR,
 979        "NVARCHAR": TokenType.NVARCHAR,
 980        "NVARCHAR2": TokenType.NVARCHAR,
 981        "BPCHAR": TokenType.BPCHAR,
 982        "STR": TokenType.TEXT,
 983        "STRING": TokenType.TEXT,
 984        "TEXT": TokenType.TEXT,
 985        "LONGTEXT": TokenType.LONGTEXT,
 986        "MEDIUMTEXT": TokenType.MEDIUMTEXT,
 987        "TINYTEXT": TokenType.TINYTEXT,
 988        "CLOB": TokenType.TEXT,
 989        "LONGVARCHAR": TokenType.TEXT,
 990        "BINARY": TokenType.BINARY,
 991        "BLOB": TokenType.VARBINARY,
 992        "LONGBLOB": TokenType.LONGBLOB,
 993        "MEDIUMBLOB": TokenType.MEDIUMBLOB,
 994        "TINYBLOB": TokenType.TINYBLOB,
 995        "BYTEA": TokenType.VARBINARY,
 996        "VARBINARY": TokenType.VARBINARY,
 997        "TIME": TokenType.TIME,
 998        "TIMETZ": TokenType.TIMETZ,
 999        "TIME_NS": TokenType.TIME_NS,
1000        "TIMESTAMP": TokenType.TIMESTAMP,
1001        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
1002        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
1003        "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
1004        "TIMESTAMPNTZ": TokenType.TIMESTAMPNTZ,
1005        "TIMESTAMP_NTZ": TokenType.TIMESTAMPNTZ,
1006        "DATE": TokenType.DATE,
1007        "DATETIME": TokenType.DATETIME,
1008        "INT4RANGE": TokenType.INT4RANGE,
1009        "INT4MULTIRANGE": TokenType.INT4MULTIRANGE,
1010        "INT8RANGE": TokenType.INT8RANGE,
1011        "INT8MULTIRANGE": TokenType.INT8MULTIRANGE,
1012        "NUMRANGE": TokenType.NUMRANGE,
1013        "NUMMULTIRANGE": TokenType.NUMMULTIRANGE,
1014        "TSRANGE": TokenType.TSRANGE,
1015        "TSMULTIRANGE": TokenType.TSMULTIRANGE,
1016        "TSTZRANGE": TokenType.TSTZRANGE,
1017        "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE,
1018        "DATERANGE": TokenType.DATERANGE,
1019        "DATEMULTIRANGE": TokenType.DATEMULTIRANGE,
1020        "UNIQUE": TokenType.UNIQUE,
1021        "VECTOR": TokenType.VECTOR,
1022        "STRUCT": TokenType.STRUCT,
1023        "SEQUENCE": TokenType.SEQUENCE,
1024        "VARIANT": TokenType.VARIANT,
1025        "ALTER": TokenType.ALTER,
1026        "ANALYZE": TokenType.ANALYZE,
1027        "CALL": TokenType.COMMAND,
1028        "COMMENT": TokenType.COMMENT,
1029        "EXPLAIN": TokenType.COMMAND,
1030        "GRANT": TokenType.GRANT,
1031        "REVOKE": TokenType.REVOKE,
1032        "OPTIMIZE": TokenType.COMMAND,
1033        "PREPARE": TokenType.COMMAND,
1034        "VACUUM": TokenType.COMMAND,
1035        "USER-DEFINED": TokenType.USERDEFINED,
1036        "FOR VERSION": TokenType.VERSION_SNAPSHOT,
1037        "FOR TIMESTAMP": TokenType.TIMESTAMP_SNAPSHOT,
1038    }
1039
1040    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
1041        " ": TokenType.SPACE,
1042        "\t": TokenType.SPACE,
1043        "\n": TokenType.BREAK,
1044        "\r": TokenType.BREAK,
1045    }
1046
1047    COMMANDS = {
1048        TokenType.COMMAND,
1049        TokenType.EXECUTE,
1050        TokenType.FETCH,
1051        TokenType.SHOW,
1052        TokenType.RENAME,
1053    }
1054
1055    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
1056
1057    # Handle numeric literals like in hive (3L = BIGINT)
1058    NUMERIC_LITERALS: t.Dict[str, str] = {}
1059
1060    COMMENTS = ["--", ("/*", "*/")]
1061
1062    __slots__ = (
1063        "sql",
1064        "size",
1065        "tokens",
1066        "dialect",
1067        "use_rs_tokenizer",
1068        "_start",
1069        "_current",
1070        "_line",
1071        "_col",
1072        "_comments",
1073        "_char",
1074        "_end",
1075        "_peek",
1076        "_prev_token_line",
1077        "_rs_dialect_settings",
1078    )
1079
1080    def __init__(
1081        self,
1082        dialect: DialectType = None,
1083        use_rs_tokenizer: t.Optional[bool] = None,
1084        **opts: t.Any,
1085    ) -> None:
1086        from sqlglot.dialects import Dialect
1087
1088        self.dialect = Dialect.get_or_raise(dialect)
1089
1090        # initialize `use_rs_tokenizer`, and allow it to be overwritten per Tokenizer instance
1091        self.use_rs_tokenizer = (
1092            use_rs_tokenizer if use_rs_tokenizer is not None else USE_RS_TOKENIZER
1093        )
1094
1095        if self.use_rs_tokenizer:
1096            self._rs_dialect_settings = RsTokenizerDialectSettings(
1097                unescaped_sequences=self.dialect.UNESCAPED_SEQUENCES,
1098                identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT,
1099                numbers_can_be_underscore_separated=self.dialect.NUMBERS_CAN_BE_UNDERSCORE_SEPARATED,
1100            )
1101
1102        self.reset()
1103
1104    def reset(self) -> None:
1105        self.sql = ""
1106        self.size = 0
1107        self.tokens: t.List[Token] = []
1108        self._start = 0
1109        self._current = 0
1110        self._line = 1
1111        self._col = 0
1112        self._comments: t.List[str] = []
1113
1114        self._char = ""
1115        self._end = False
1116        self._peek = ""
1117        self._prev_token_line = -1
1118
1119    def tokenize(self, sql: str) -> t.List[Token]:
1120        """Returns a list of tokens corresponding to the SQL string `sql`."""
1121        if self.use_rs_tokenizer:
1122            return self.tokenize_rs(sql)
1123
1124        self.reset()
1125        self.sql = sql
1126        self.size = len(sql)
1127
1128        try:
1129            self._scan()
1130        except Exception as e:
1131            start = max(self._current - 50, 0)
1132            end = min(self._current + 50, self.size - 1)
1133            context = self.sql[start:end]
1134            raise TokenError(f"Error tokenizing '{context}'") from e
1135
1136        return self.tokens
1137
1138    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
1139        while self.size and not self._end:
1140            current = self._current
1141
1142            # Skip spaces here rather than iteratively calling advance() for performance reasons
1143            while current < self.size:
1144                char = self.sql[current]
1145
1146                if char.isspace() and (char == " " or char == "\t"):
1147                    current += 1
1148                else:
1149                    break
1150
1151            offset = current - self._current if current > self._current else 1
1152
1153            self._start = current
1154            self._advance(offset)
1155
1156            if not self._char.isspace():
1157                if self._char.isdigit():
1158                    self._scan_number()
1159                elif self._char in self._IDENTIFIERS:
1160                    self._scan_identifier(self._IDENTIFIERS[self._char])
1161                else:
1162                    self._scan_keywords()
1163
1164            if until and until():
1165                break
1166
1167        if self.tokens and self._comments:
1168            self.tokens[-1].comments.extend(self._comments)
1169
1170    def _chars(self, size: int) -> str:
1171        if size == 1:
1172            return self._char
1173
1174        start = self._current - 1
1175        end = start + size
1176
1177        return self.sql[start:end] if end <= self.size else ""
1178
1179    def _advance(self, i: int = 1, alnum: bool = False) -> None:
1180        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
1181            # Ensures we don't count an extra line if we get a \r\n line break sequence
1182            if not (self._char == "\r" and self._peek == "\n"):
1183                self._col = i
1184                self._line += 1
1185        else:
1186            self._col += i
1187
1188        self._current += i
1189        self._end = self._current >= self.size
1190        self._char = self.sql[self._current - 1]
1191        self._peek = "" if self._end else self.sql[self._current]
1192
1193        if alnum and self._char.isalnum():
1194            # Here we use local variables instead of attributes for better performance
1195            _col = self._col
1196            _current = self._current
1197            _end = self._end
1198            _peek = self._peek
1199
1200            while _peek.isalnum():
1201                _col += 1
1202                _current += 1
1203                _end = _current >= self.size
1204                _peek = "" if _end else self.sql[_current]
1205
1206            self._col = _col
1207            self._current = _current
1208            self._end = _end
1209            self._peek = _peek
1210            self._char = self.sql[_current - 1]
1211
1212    @property
1213    def _text(self) -> str:
1214        return self.sql[self._start : self._current]
1215
1216    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
1217        self._prev_token_line = self._line
1218
1219        if self._comments and token_type == TokenType.SEMICOLON and self.tokens:
1220            self.tokens[-1].comments.extend(self._comments)
1221            self._comments = []
1222
1223        self.tokens.append(
1224            Token(
1225                token_type,
1226                text=self._text if text is None else text,
1227                line=self._line,
1228                col=self._col,
1229                start=self._start,
1230                end=self._current - 1,
1231                comments=self._comments,
1232            )
1233        )
1234        self._comments = []
1235
1236        # If we have either a semicolon or a begin token before the command's token, we'll parse
1237        # whatever follows the command's token as a string
1238        if (
1239            token_type in self.COMMANDS
1240            and self._peek != ";"
1241            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
1242        ):
1243            start = self._current
1244            tokens = len(self.tokens)
1245            self._scan(lambda: self._peek == ";")
1246            self.tokens = self.tokens[:tokens]
1247            text = self.sql[start : self._current].strip()
1248            if text:
1249                self._add(TokenType.STRING, text)
1250
1251    def _scan_keywords(self) -> None:
1252        size = 0
1253        word = None
1254        chars = self._text
1255        char = chars
1256        prev_space = False
1257        skip = False
1258        trie = self._KEYWORD_TRIE
1259        single_token = char in self.SINGLE_TOKENS
1260
1261        while chars:
1262            if skip:
1263                result = TrieResult.PREFIX
1264            else:
1265                result, trie = in_trie(trie, char.upper())
1266
1267            if result == TrieResult.FAILED:
1268                break
1269            if result == TrieResult.EXISTS:
1270                word = chars
1271
1272            end = self._current + size
1273            size += 1
1274
1275            if end < self.size:
1276                char = self.sql[end]
1277                single_token = single_token or char in self.SINGLE_TOKENS
1278                is_space = char.isspace()
1279
1280                if not is_space or not prev_space:
1281                    if is_space:
1282                        char = " "
1283                    chars += char
1284                    prev_space = is_space
1285                    skip = False
1286                else:
1287                    skip = True
1288            else:
1289                char = ""
1290                break
1291
1292        if word:
1293            if self._scan_string(word):
1294                return
1295            if self._scan_comment(word):
1296                return
1297            if prev_space or single_token or not char:
1298                self._advance(size - 1)
1299                word = word.upper()
1300                self._add(self.KEYWORDS[word], text=word)
1301                return
1302
1303        if self._char in self.SINGLE_TOKENS:
1304            self._add(self.SINGLE_TOKENS[self._char], text=self._char)
1305            return
1306
1307        self._scan_var()
1308
1309    def _scan_comment(self, comment_start: str) -> bool:
1310        if comment_start not in self._COMMENTS:
1311            return False
1312
1313        comment_start_line = self._line
1314        comment_start_size = len(comment_start)
1315        comment_end = self._COMMENTS[comment_start]
1316
1317        if comment_end:
1318            # Skip the comment's start delimiter
1319            self._advance(comment_start_size)
1320
1321            comment_count = 1
1322            comment_end_size = len(comment_end)
1323
1324            while not self._end:
1325                if self._chars(comment_end_size) == comment_end:
1326                    comment_count -= 1
1327                    if not comment_count:
1328                        break
1329
1330                self._advance(alnum=True)
1331
1332                # Nested comments are allowed by some dialects, e.g. databricks, duckdb, postgres
1333                if (
1334                    self.NESTED_COMMENTS
1335                    and not self._end
1336                    and self._chars(comment_end_size) == comment_start
1337                ):
1338                    self._advance(comment_start_size)
1339                    comment_count += 1
1340
1341            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])
1342            self._advance(comment_end_size - 1)
1343        else:
1344            while not self._end and self.WHITE_SPACE.get(self._peek) is not TokenType.BREAK:
1345                self._advance(alnum=True)
1346            self._comments.append(self._text[comment_start_size:])
1347
1348        if (
1349            comment_start == self.HINT_START
1350            and self.tokens
1351            and self.tokens[-1].token_type in self.TOKENS_PRECEDING_HINT
1352        ):
1353            self._add(TokenType.HINT)
1354
1355        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
1356        # Multiple consecutive comments are preserved by appending them to the current comments list.
1357        if comment_start_line == self._prev_token_line:
1358            self.tokens[-1].comments.extend(self._comments)
1359            self._comments = []
1360            self._prev_token_line = self._line
1361
1362        return True
1363
1364    def _scan_number(self) -> None:
1365        if self._char == "0":
1366            peek = self._peek.upper()
1367            if peek == "B":
1368                return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER)
1369            elif peek == "X":
1370                return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER)
1371
1372        decimal = False
1373        scientific = 0
1374
1375        while True:
1376            if self._peek.isdigit():
1377                self._advance()
1378            elif self._peek == "." and not decimal:
1379                if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER:
1380                    return self._add(TokenType.NUMBER)
1381                decimal = True
1382                self._advance()
1383            elif self._peek in ("-", "+") and scientific == 1:
1384                # Only consume +/- if followed by a digit
1385                if self._current + 1 < self.size and self.sql[self._current + 1].isdigit():
1386                    scientific += 1
1387                    self._advance()
1388                else:
1389                    return self._add(TokenType.NUMBER)
1390            elif self._peek.upper() == "E" and not scientific:
1391                scientific += 1
1392                self._advance()
1393            elif self._peek == "_" and self.dialect.NUMBERS_CAN_BE_UNDERSCORE_SEPARATED:
1394                self._advance()
1395            elif self._peek.isidentifier():
1396                number_text = self._text
1397                literal = ""
1398
1399                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:
1400                    literal += self._peek
1401                    self._advance()
1402
1403                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal.upper(), ""))
1404
1405                if token_type:
1406                    self._add(TokenType.NUMBER, number_text)
1407                    self._add(TokenType.DCOLON, "::")
1408                    return self._add(token_type, literal)
1409                elif self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT:
1410                    return self._add(TokenType.VAR)
1411
1412                self._advance(-len(literal))
1413                return self._add(TokenType.NUMBER, number_text)
1414            else:
1415                return self._add(TokenType.NUMBER)
1416
1417    def _scan_bits(self) -> None:
1418        self._advance()
1419        value = self._extract_value()
1420        try:
1421            # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier
1422            int(value, 2)
1423            self._add(TokenType.BIT_STRING, value[2:])  # Drop the 0b
1424        except ValueError:
1425            self._add(TokenType.IDENTIFIER)
1426
1427    def _scan_hex(self) -> None:
1428        self._advance()
1429        value = self._extract_value()
1430        try:
1431            # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier
1432            int(value, 16)
1433            self._add(TokenType.HEX_STRING, value[2:])  # Drop the 0x
1434        except ValueError:
1435            self._add(TokenType.IDENTIFIER)
1436
1437    def _extract_value(self) -> str:
1438        while True:
1439            char = self._peek.strip()
1440            if char and char not in self.SINGLE_TOKENS:
1441                self._advance(alnum=True)
1442            else:
1443                break
1444
1445        return self._text
1446
1447    def _scan_string(self, start: str) -> bool:
1448        base = None
1449        token_type = TokenType.STRING
1450
1451        if start in self._QUOTES:
1452            end = self._QUOTES[start]
1453        elif start in self._FORMAT_STRINGS:
1454            end, token_type = self._FORMAT_STRINGS[start]
1455
1456            if token_type == TokenType.HEX_STRING:
1457                base = 16
1458            elif token_type == TokenType.BIT_STRING:
1459                base = 2
1460            elif token_type == TokenType.HEREDOC_STRING:
1461                self._advance()
1462
1463                if self._char == end:
1464                    tag = ""
1465                else:
1466                    tag = self._extract_string(
1467                        end,
1468                        raw_string=True,
1469                        raise_unmatched=not self.HEREDOC_TAG_IS_IDENTIFIER,
1470                    )
1471
1472                if (
1473                    tag
1474                    and self.HEREDOC_TAG_IS_IDENTIFIER
1475                    and (self._end or tag.isdigit() or any(c.isspace() for c in tag))
1476                ):
1477                    if not self._end:
1478                        self._advance(-1)
1479
1480                    self._advance(-len(tag))
1481                    self._add(self.HEREDOC_STRING_ALTERNATIVE)
1482                    return True
1483
1484                end = f"{start}{tag}{end}"
1485        else:
1486            return False
1487
1488        self._advance(len(start))
1489        text = self._extract_string(
1490            end,
1491            escapes=(
1492                self._BYTE_STRING_ESCAPES
1493                if token_type == TokenType.BYTE_STRING
1494                else self._STRING_ESCAPES
1495            ),
1496            raw_string=token_type == TokenType.RAW_STRING,
1497        )
1498
1499        if base and text:
1500            try:
1501                int(text, base)
1502            except Exception:
1503                raise TokenError(
1504                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1505                )
1506
1507        self._add(token_type, text)
1508        return True
1509
1510    def _scan_identifier(self, identifier_end: str) -> None:
1511        self._advance()
1512        text = self._extract_string(
1513            identifier_end, escapes=self._IDENTIFIER_ESCAPES | {identifier_end}
1514        )
1515        self._add(TokenType.IDENTIFIER, text)
1516
1517    def _scan_var(self) -> None:
1518        while True:
1519            char = self._peek.strip()
1520            if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS):
1521                self._advance(alnum=True)
1522            else:
1523                break
1524
1525        self._add(
1526            TokenType.VAR
1527            if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER
1528            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1529        )
1530
1531    def _extract_string(
1532        self,
1533        delimiter: str,
1534        escapes: t.Optional[t.Set[str]] = None,
1535        raw_string: bool = False,
1536        raise_unmatched: bool = True,
1537    ) -> str:
1538        text = ""
1539        delim_size = len(delimiter)
1540        escapes = self._STRING_ESCAPES if escapes is None else escapes
1541
1542        while True:
1543            if (
1544                not raw_string
1545                and self.dialect.UNESCAPED_SEQUENCES
1546                and self._peek
1547                and self._char in escapes
1548            ):
1549                unescaped_sequence = self.dialect.UNESCAPED_SEQUENCES.get(self._char + self._peek)
1550                if unescaped_sequence:
1551                    self._advance(2)
1552                    text += unescaped_sequence
1553                    continue
1554
1555            is_valid_custom_escape = (
1556                self.ESCAPE_FOLLOW_CHARS
1557                and self._char == "\\"
1558                and self._peek not in self.ESCAPE_FOLLOW_CHARS
1559            )
1560
1561            if (
1562                (self.STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS or not raw_string)
1563                and self._char in escapes
1564                and (self._peek == delimiter or self._peek in escapes or is_valid_custom_escape)
1565                and (self._char not in self._QUOTES or self._char == self._peek)
1566            ):
1567                if self._peek == delimiter:
1568                    text += self._peek
1569                elif is_valid_custom_escape and self._char != self._peek:
1570                    text += self._peek
1571                else:
1572                    text += self._char + self._peek
1573
1574                if self._current + 1 < self.size:
1575                    self._advance(2)
1576                else:
1577                    raise TokenError(f"Missing {delimiter} from {self._line}:{self._current}")
1578            else:
1579                if self._chars(delim_size) == delimiter:
1580                    if delim_size > 1:
1581                        self._advance(delim_size - 1)
1582                    break
1583
1584                if self._end:
1585                    if not raise_unmatched:
1586                        return text + self._char
1587
1588                    raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}")
1589
1590                current = self._current - 1
1591                self._advance(alnum=True)
1592                text += self.sql[current : self._current - 1]
1593
1594        return text
1595
1596    def tokenize_rs(self, sql: str) -> t.List[Token]:
1597        if not self._RS_TOKENIZER:
1598            raise SqlglotError("Rust tokenizer is not available")
1599
1600        tokens, error_msg = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings)
1601        for token in tokens:
1602            token.token_type = _ALL_TOKEN_TYPES[token.token_type_index]
1603
1604        # Setting this here so partial token lists can be inspected even if there is a failure
1605        self.tokens = tokens
1606
1607        if error_msg is not None:
1608            raise TokenError(error_msg)
1609
1610        return tokens
class TokenType(sqlglot.helper.AutoName):
 29class TokenType(AutoName):
 30    L_PAREN = auto()
 31    R_PAREN = auto()
 32    L_BRACKET = auto()
 33    R_BRACKET = auto()
 34    L_BRACE = auto()
 35    R_BRACE = auto()
 36    COMMA = auto()
 37    DOT = auto()
 38    DASH = auto()
 39    PLUS = auto()
 40    COLON = auto()
 41    DOTCOLON = auto()
 42    DCOLON = auto()
 43    DCOLONDOLLAR = auto()
 44    DCOLONPERCENT = auto()
 45    DCOLONQMARK = auto()
 46    DQMARK = auto()
 47    SEMICOLON = auto()
 48    STAR = auto()
 49    BACKSLASH = auto()
 50    SLASH = auto()
 51    LT = auto()
 52    LTE = auto()
 53    GT = auto()
 54    GTE = auto()
 55    NOT = auto()
 56    EQ = auto()
 57    NEQ = auto()
 58    NULLSAFE_EQ = auto()
 59    COLON_EQ = auto()
 60    COLON_GT = auto()
 61    NCOLON_GT = auto()
 62    AND = auto()
 63    OR = auto()
 64    AMP = auto()
 65    DPIPE = auto()
 66    PIPE_GT = auto()
 67    PIPE = auto()
 68    PIPE_SLASH = auto()
 69    DPIPE_SLASH = auto()
 70    CARET = auto()
 71    CARET_AT = auto()
 72    TILDE = auto()
 73    ARROW = auto()
 74    DARROW = auto()
 75    FARROW = auto()
 76    HASH = auto()
 77    HASH_ARROW = auto()
 78    DHASH_ARROW = auto()
 79    LR_ARROW = auto()
 80    DAT = auto()
 81    LT_AT = auto()
 82    AT_GT = auto()
 83    DOLLAR = auto()
 84    PARAMETER = auto()
 85    SESSION = auto()
 86    SESSION_PARAMETER = auto()
 87    SESSION_USER = auto()
 88    DAMP = auto()
 89    AMP_LT = auto()
 90    AMP_GT = auto()
 91    ADJACENT = auto()
 92    XOR = auto()
 93    DSTAR = auto()
 94    QMARK_AMP = auto()
 95    QMARK_PIPE = auto()
 96    HASH_DASH = auto()
 97    EXCLAMATION = auto()
 98
 99    URI_START = auto()
100
101    BLOCK_START = auto()
102    BLOCK_END = auto()
103
104    SPACE = auto()
105    BREAK = auto()
106
107    STRING = auto()
108    NUMBER = auto()
109    IDENTIFIER = auto()
110    DATABASE = auto()
111    COLUMN = auto()
112    COLUMN_DEF = auto()
113    SCHEMA = auto()
114    TABLE = auto()
115    WAREHOUSE = auto()
116    STAGE = auto()
117    STREAMLIT = auto()
118    VAR = auto()
119    BIT_STRING = auto()
120    HEX_STRING = auto()
121    BYTE_STRING = auto()
122    NATIONAL_STRING = auto()
123    RAW_STRING = auto()
124    HEREDOC_STRING = auto()
125    UNICODE_STRING = auto()
126
127    # types
128    BIT = auto()
129    BOOLEAN = auto()
130    TINYINT = auto()
131    UTINYINT = auto()
132    SMALLINT = auto()
133    USMALLINT = auto()
134    MEDIUMINT = auto()
135    UMEDIUMINT = auto()
136    INT = auto()
137    UINT = auto()
138    BIGINT = auto()
139    UBIGINT = auto()
140    BIGNUM = auto()  # unlimited precision int
141    INT128 = auto()
142    UINT128 = auto()
143    INT256 = auto()
144    UINT256 = auto()
145    FLOAT = auto()
146    DOUBLE = auto()
147    UDOUBLE = auto()
148    DECIMAL = auto()
149    DECIMAL32 = auto()
150    DECIMAL64 = auto()
151    DECIMAL128 = auto()
152    DECIMAL256 = auto()
153    DECFLOAT = auto()
154    UDECIMAL = auto()
155    BIGDECIMAL = auto()
156    CHAR = auto()
157    NCHAR = auto()
158    VARCHAR = auto()
159    NVARCHAR = auto()
160    BPCHAR = auto()
161    TEXT = auto()
162    MEDIUMTEXT = auto()
163    LONGTEXT = auto()
164    BLOB = auto()
165    MEDIUMBLOB = auto()
166    LONGBLOB = auto()
167    TINYBLOB = auto()
168    TINYTEXT = auto()
169    NAME = auto()
170    BINARY = auto()
171    VARBINARY = auto()
172    JSON = auto()
173    JSONB = auto()
174    TIME = auto()
175    TIMETZ = auto()
176    TIME_NS = auto()
177    TIMESTAMP = auto()
178    TIMESTAMPTZ = auto()
179    TIMESTAMPLTZ = auto()
180    TIMESTAMPNTZ = auto()
181    TIMESTAMP_S = auto()
182    TIMESTAMP_MS = auto()
183    TIMESTAMP_NS = auto()
184    DATETIME = auto()
185    DATETIME2 = auto()
186    DATETIME64 = auto()
187    SMALLDATETIME = auto()
188    DATE = auto()
189    DATE32 = auto()
190    INT4RANGE = auto()
191    INT4MULTIRANGE = auto()
192    INT8RANGE = auto()
193    INT8MULTIRANGE = auto()
194    NUMRANGE = auto()
195    NUMMULTIRANGE = auto()
196    TSRANGE = auto()
197    TSMULTIRANGE = auto()
198    TSTZRANGE = auto()
199    TSTZMULTIRANGE = auto()
200    DATERANGE = auto()
201    DATEMULTIRANGE = auto()
202    UUID = auto()
203    GEOGRAPHY = auto()
204    GEOGRAPHYPOINT = auto()
205    NULLABLE = auto()
206    GEOMETRY = auto()
207    POINT = auto()
208    RING = auto()
209    LINESTRING = auto()
210    LOCALTIME = auto()
211    LOCALTIMESTAMP = auto()
212    SYSTIMESTAMP = auto()
213    MULTILINESTRING = auto()
214    POLYGON = auto()
215    MULTIPOLYGON = auto()
216    HLLSKETCH = auto()
217    HSTORE = auto()
218    SUPER = auto()
219    SERIAL = auto()
220    SMALLSERIAL = auto()
221    BIGSERIAL = auto()
222    XML = auto()
223    YEAR = auto()
224    USERDEFINED = auto()
225    MONEY = auto()
226    SMALLMONEY = auto()
227    ROWVERSION = auto()
228    IMAGE = auto()
229    VARIANT = auto()
230    OBJECT = auto()
231    INET = auto()
232    IPADDRESS = auto()
233    IPPREFIX = auto()
234    IPV4 = auto()
235    IPV6 = auto()
236    ENUM = auto()
237    ENUM8 = auto()
238    ENUM16 = auto()
239    FIXEDSTRING = auto()
240    LOWCARDINALITY = auto()
241    NESTED = auto()
242    AGGREGATEFUNCTION = auto()
243    SIMPLEAGGREGATEFUNCTION = auto()
244    TDIGEST = auto()
245    UNKNOWN = auto()
246    VECTOR = auto()
247    DYNAMIC = auto()
248    VOID = auto()
249
250    # keywords
251    ALIAS = auto()
252    ALTER = auto()
253    ALL = auto()
254    ANTI = auto()
255    ANY = auto()
256    APPLY = auto()
257    ARRAY = auto()
258    ASC = auto()
259    ASOF = auto()
260    ATTACH = auto()
261    AUTO_INCREMENT = auto()
262    BEGIN = auto()
263    BETWEEN = auto()
264    BULK_COLLECT_INTO = auto()
265    CACHE = auto()
266    CASE = auto()
267    CHARACTER_SET = auto()
268    CLUSTER_BY = auto()
269    COLLATE = auto()
270    COMMAND = auto()
271    COMMENT = auto()
272    COMMIT = auto()
273    CONNECT_BY = auto()
274    CONSTRAINT = auto()
275    COPY = auto()
276    CREATE = auto()
277    CROSS = auto()
278    CUBE = auto()
279    CURRENT_DATE = auto()
280    CURRENT_DATETIME = auto()
281    CURRENT_SCHEMA = auto()
282    CURRENT_TIME = auto()
283    CURRENT_TIMESTAMP = auto()
284    CURRENT_USER = auto()
285    CURRENT_ROLE = auto()
286    CURRENT_CATALOG = auto()
287    DECLARE = auto()
288    DEFAULT = auto()
289    DELETE = auto()
290    DESC = auto()
291    DESCRIBE = auto()
292    DETACH = auto()
293    DICTIONARY = auto()
294    DISTINCT = auto()
295    DISTRIBUTE_BY = auto()
296    DIV = auto()
297    DROP = auto()
298    ELSE = auto()
299    END = auto()
300    ESCAPE = auto()
301    EXCEPT = auto()
302    EXECUTE = auto()
303    EXISTS = auto()
304    FALSE = auto()
305    FETCH = auto()
306    FILE = auto()
307    FILE_FORMAT = auto()
308    FILTER = auto()
309    FINAL = auto()
310    FIRST = auto()
311    FOR = auto()
312    FORCE = auto()
313    FOREIGN_KEY = auto()
314    FORMAT = auto()
315    FROM = auto()
316    FULL = auto()
317    FUNCTION = auto()
318    GET = auto()
319    GLOB = auto()
320    GLOBAL = auto()
321    GRANT = auto()
322    GROUP_BY = auto()
323    GROUPING_SETS = auto()
324    HAVING = auto()
325    HINT = auto()
326    IGNORE = auto()
327    ILIKE = auto()
328    IN = auto()
329    INDEX = auto()
330    INDEXED_BY = auto()
331    INNER = auto()
332    INSERT = auto()
333    INSTALL = auto()
334    INTERSECT = auto()
335    INTERVAL = auto()
336    INTO = auto()
337    INTRODUCER = auto()
338    IRLIKE = auto()
339    IS = auto()
340    ISNULL = auto()
341    JOIN = auto()
342    JOIN_MARKER = auto()
343    KEEP = auto()
344    KEY = auto()
345    KILL = auto()
346    LANGUAGE = auto()
347    LATERAL = auto()
348    LEFT = auto()
349    LIKE = auto()
350    LIMIT = auto()
351    LIST = auto()
352    LOAD = auto()
353    LOCK = auto()
354    MAP = auto()
355    MATCH = auto()
356    MATCH_CONDITION = auto()
357    MATCH_RECOGNIZE = auto()
358    MEMBER_OF = auto()
359    MERGE = auto()
360    MOD = auto()
361    MODEL = auto()
362    NATURAL = auto()
363    NEXT = auto()
364    NOTHING = auto()
365    NOTNULL = auto()
366    NULL = auto()
367    OBJECT_IDENTIFIER = auto()
368    OFFSET = auto()
369    ON = auto()
370    ONLY = auto()
371    OPERATOR = auto()
372    ORDER_BY = auto()
373    ORDER_SIBLINGS_BY = auto()
374    ORDERED = auto()
375    ORDINALITY = auto()
376    OUT = auto()
377    INOUT = auto()
378    OUTER = auto()
379    OVER = auto()
380    OVERLAPS = auto()
381    OVERWRITE = auto()
382    PARTITION = auto()
383    PARTITION_BY = auto()
384    PERCENT = auto()
385    PIVOT = auto()
386    PLACEHOLDER = auto()
387    POSITIONAL = auto()
388    PRAGMA = auto()
389    PREWHERE = auto()
390    PRIMARY_KEY = auto()
391    PROCEDURE = auto()
392    PROPERTIES = auto()
393    PSEUDO_TYPE = auto()
394    PUT = auto()
395    QUALIFY = auto()
396    QUOTE = auto()
397    QDCOLON = auto()
398    RANGE = auto()
399    RECURSIVE = auto()
400    REFRESH = auto()
401    RENAME = auto()
402    REPLACE = auto()
403    RETURNING = auto()
404    REVOKE = auto()
405    REFERENCES = auto()
406    RIGHT = auto()
407    RLIKE = auto()
408    ROLLBACK = auto()
409    ROLLUP = auto()
410    ROW = auto()
411    ROWS = auto()
412    SELECT = auto()
413    SEMI = auto()
414    SEPARATOR = auto()
415    SEQUENCE = auto()
416    SERDE_PROPERTIES = auto()
417    SET = auto()
418    SETTINGS = auto()
419    SHOW = auto()
420    SIMILAR_TO = auto()
421    SOME = auto()
422    SORT_BY = auto()
423    SOUNDS_LIKE = auto()
424    START_WITH = auto()
425    STORAGE_INTEGRATION = auto()
426    STRAIGHT_JOIN = auto()
427    STRUCT = auto()
428    SUMMARIZE = auto()
429    TABLE_SAMPLE = auto()
430    TAG = auto()
431    TEMPORARY = auto()
432    TOP = auto()
433    THEN = auto()
434    TRUE = auto()
435    TRUNCATE = auto()
436    UNCACHE = auto()
437    UNION = auto()
438    UNNEST = auto()
439    UNPIVOT = auto()
440    UPDATE = auto()
441    USE = auto()
442    USING = auto()
443    VALUES = auto()
444    VARIADIC = auto()
445    VIEW = auto()
446    SEMANTIC_VIEW = auto()
447    VOLATILE = auto()
448    WHEN = auto()
449    WHERE = auto()
450    WINDOW = auto()
451    WITH = auto()
452    UNIQUE = auto()
453    UTC_DATE = auto()
454    UTC_TIME = auto()
455    UTC_TIMESTAMP = auto()
456    VERSION_SNAPSHOT = auto()
457    TIMESTAMP_SNAPSHOT = auto()
458    OPTION = auto()
459    SINK = auto()
460    SOURCE = auto()
461    ANALYZE = auto()
462    NAMESPACE = auto()
463    EXPORT = auto()
464
465    # sentinel
466    HIVE_TOKEN_STREAM = auto()

An enumeration.

L_PAREN = <TokenType.L_PAREN: 'L_PAREN'>
R_PAREN = <TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET = <TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET = <TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE = <TokenType.L_BRACE: 'L_BRACE'>
R_BRACE = <TokenType.R_BRACE: 'R_BRACE'>
COMMA = <TokenType.COMMA: 'COMMA'>
DOT = <TokenType.DOT: 'DOT'>
DASH = <TokenType.DASH: 'DASH'>
PLUS = <TokenType.PLUS: 'PLUS'>
COLON = <TokenType.COLON: 'COLON'>
DOTCOLON = <TokenType.DOTCOLON: 'DOTCOLON'>
DCOLON = <TokenType.DCOLON: 'DCOLON'>
DCOLONDOLLAR = <TokenType.DCOLONDOLLAR: 'DCOLONDOLLAR'>
DCOLONPERCENT = <TokenType.DCOLONPERCENT: 'DCOLONPERCENT'>
DCOLONQMARK = <TokenType.DCOLONQMARK: 'DCOLONQMARK'>
DQMARK = <TokenType.DQMARK: 'DQMARK'>
SEMICOLON = <TokenType.SEMICOLON: 'SEMICOLON'>
STAR = <TokenType.STAR: 'STAR'>
BACKSLASH = <TokenType.BACKSLASH: 'BACKSLASH'>
SLASH = <TokenType.SLASH: 'SLASH'>
LT = <TokenType.LT: 'LT'>
LTE = <TokenType.LTE: 'LTE'>
GT = <TokenType.GT: 'GT'>
GTE = <TokenType.GTE: 'GTE'>
NOT = <TokenType.NOT: 'NOT'>
EQ = <TokenType.EQ: 'EQ'>
NEQ = <TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ = <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
COLON_EQ = <TokenType.COLON_EQ: 'COLON_EQ'>
COLON_GT = <TokenType.COLON_GT: 'COLON_GT'>
NCOLON_GT = <TokenType.NCOLON_GT: 'NCOLON_GT'>
AND = <TokenType.AND: 'AND'>
OR = <TokenType.OR: 'OR'>
AMP = <TokenType.AMP: 'AMP'>
DPIPE = <TokenType.DPIPE: 'DPIPE'>
PIPE_GT = <TokenType.PIPE_GT: 'PIPE_GT'>
PIPE = <TokenType.PIPE: 'PIPE'>
PIPE_SLASH = <TokenType.PIPE_SLASH: 'PIPE_SLASH'>
DPIPE_SLASH = <TokenType.DPIPE_SLASH: 'DPIPE_SLASH'>
CARET = <TokenType.CARET: 'CARET'>
CARET_AT = <TokenType.CARET_AT: 'CARET_AT'>
TILDE = <TokenType.TILDE: 'TILDE'>
ARROW = <TokenType.ARROW: 'ARROW'>
DARROW = <TokenType.DARROW: 'DARROW'>
FARROW = <TokenType.FARROW: 'FARROW'>
HASH = <TokenType.HASH: 'HASH'>
HASH_ARROW = <TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW = <TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW = <TokenType.LR_ARROW: 'LR_ARROW'>
DAT = <TokenType.DAT: 'DAT'>
LT_AT = <TokenType.LT_AT: 'LT_AT'>
AT_GT = <TokenType.AT_GT: 'AT_GT'>
DOLLAR = <TokenType.DOLLAR: 'DOLLAR'>
PARAMETER = <TokenType.PARAMETER: 'PARAMETER'>
SESSION = <TokenType.SESSION: 'SESSION'>
SESSION_PARAMETER = <TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
SESSION_USER = <TokenType.SESSION_USER: 'SESSION_USER'>
DAMP = <TokenType.DAMP: 'DAMP'>
AMP_LT = <TokenType.AMP_LT: 'AMP_LT'>
AMP_GT = <TokenType.AMP_GT: 'AMP_GT'>
ADJACENT = <TokenType.ADJACENT: 'ADJACENT'>
XOR = <TokenType.XOR: 'XOR'>
DSTAR = <TokenType.DSTAR: 'DSTAR'>
QMARK_AMP = <TokenType.QMARK_AMP: 'QMARK_AMP'>
QMARK_PIPE = <TokenType.QMARK_PIPE: 'QMARK_PIPE'>
HASH_DASH = <TokenType.HASH_DASH: 'HASH_DASH'>
EXCLAMATION = <TokenType.EXCLAMATION: 'EXCLAMATION'>
URI_START = <TokenType.URI_START: 'URI_START'>
BLOCK_START = <TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END = <TokenType.BLOCK_END: 'BLOCK_END'>
SPACE = <TokenType.SPACE: 'SPACE'>
BREAK = <TokenType.BREAK: 'BREAK'>
STRING = <TokenType.STRING: 'STRING'>
NUMBER = <TokenType.NUMBER: 'NUMBER'>
IDENTIFIER = <TokenType.IDENTIFIER: 'IDENTIFIER'>
DATABASE = <TokenType.DATABASE: 'DATABASE'>
COLUMN = <TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF = <TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA = <TokenType.SCHEMA: 'SCHEMA'>
TABLE = <TokenType.TABLE: 'TABLE'>
WAREHOUSE = <TokenType.WAREHOUSE: 'WAREHOUSE'>
STAGE = <TokenType.STAGE: 'STAGE'>
STREAMLIT = <TokenType.STREAMLIT: 'STREAMLIT'>
VAR = <TokenType.VAR: 'VAR'>
BIT_STRING = <TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING = <TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING = <TokenType.BYTE_STRING: 'BYTE_STRING'>
NATIONAL_STRING = <TokenType.NATIONAL_STRING: 'NATIONAL_STRING'>
RAW_STRING = <TokenType.RAW_STRING: 'RAW_STRING'>
HEREDOC_STRING = <TokenType.HEREDOC_STRING: 'HEREDOC_STRING'>
UNICODE_STRING = <TokenType.UNICODE_STRING: 'UNICODE_STRING'>
BIT = <TokenType.BIT: 'BIT'>
BOOLEAN = <TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT = <TokenType.TINYINT: 'TINYINT'>
UTINYINT = <TokenType.UTINYINT: 'UTINYINT'>
SMALLINT = <TokenType.SMALLINT: 'SMALLINT'>
USMALLINT = <TokenType.USMALLINT: 'USMALLINT'>
MEDIUMINT = <TokenType.MEDIUMINT: 'MEDIUMINT'>
UMEDIUMINT = <TokenType.UMEDIUMINT: 'UMEDIUMINT'>
INT = <TokenType.INT: 'INT'>
UINT = <TokenType.UINT: 'UINT'>
BIGINT = <TokenType.BIGINT: 'BIGINT'>
UBIGINT = <TokenType.UBIGINT: 'UBIGINT'>
BIGNUM = <TokenType.BIGNUM: 'BIGNUM'>
INT128 = <TokenType.INT128: 'INT128'>
UINT128 = <TokenType.UINT128: 'UINT128'>
INT256 = <TokenType.INT256: 'INT256'>
UINT256 = <TokenType.UINT256: 'UINT256'>
FLOAT = <TokenType.FLOAT: 'FLOAT'>
DOUBLE = <TokenType.DOUBLE: 'DOUBLE'>
UDOUBLE = <TokenType.UDOUBLE: 'UDOUBLE'>
DECIMAL = <TokenType.DECIMAL: 'DECIMAL'>
DECIMAL32 = <TokenType.DECIMAL32: 'DECIMAL32'>
DECIMAL64 = <TokenType.DECIMAL64: 'DECIMAL64'>
DECIMAL128 = <TokenType.DECIMAL128: 'DECIMAL128'>
DECIMAL256 = <TokenType.DECIMAL256: 'DECIMAL256'>
DECFLOAT = <TokenType.DECFLOAT: 'DECFLOAT'>
UDECIMAL = <TokenType.UDECIMAL: 'UDECIMAL'>
BIGDECIMAL = <TokenType.BIGDECIMAL: 'BIGDECIMAL'>
CHAR = <TokenType.CHAR: 'CHAR'>
NCHAR = <TokenType.NCHAR: 'NCHAR'>
VARCHAR = <TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR = <TokenType.NVARCHAR: 'NVARCHAR'>
BPCHAR = <TokenType.BPCHAR: 'BPCHAR'>
TEXT = <TokenType.TEXT: 'TEXT'>
MEDIUMTEXT = <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT = <TokenType.LONGTEXT: 'LONGTEXT'>
BLOB = <TokenType.BLOB: 'BLOB'>
MEDIUMBLOB = <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB = <TokenType.LONGBLOB: 'LONGBLOB'>
TINYBLOB = <TokenType.TINYBLOB: 'TINYBLOB'>
TINYTEXT = <TokenType.TINYTEXT: 'TINYTEXT'>
NAME = <TokenType.NAME: 'NAME'>
BINARY = <TokenType.BINARY: 'BINARY'>
VARBINARY = <TokenType.VARBINARY: 'VARBINARY'>
JSON = <TokenType.JSON: 'JSON'>
JSONB = <TokenType.JSONB: 'JSONB'>
TIME = <TokenType.TIME: 'TIME'>
TIMETZ = <TokenType.TIMETZ: 'TIMETZ'>
TIME_NS = <TokenType.TIME_NS: 'TIME_NS'>
TIMESTAMP = <TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ = <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ = <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
TIMESTAMPNTZ = <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>
TIMESTAMP_S = <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>
TIMESTAMP_MS = <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>
TIMESTAMP_NS = <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>
DATETIME = <TokenType.DATETIME: 'DATETIME'>
DATETIME2 = <TokenType.DATETIME2: 'DATETIME2'>
DATETIME64 = <TokenType.DATETIME64: 'DATETIME64'>
SMALLDATETIME = <TokenType.SMALLDATETIME: 'SMALLDATETIME'>
DATE = <TokenType.DATE: 'DATE'>
DATE32 = <TokenType.DATE32: 'DATE32'>
INT4RANGE = <TokenType.INT4RANGE: 'INT4RANGE'>
INT4MULTIRANGE = <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>
INT8RANGE = <TokenType.INT8RANGE: 'INT8RANGE'>
INT8MULTIRANGE = <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>
NUMRANGE = <TokenType.NUMRANGE: 'NUMRANGE'>
NUMMULTIRANGE = <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>
TSRANGE = <TokenType.TSRANGE: 'TSRANGE'>
TSMULTIRANGE = <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>
TSTZRANGE = <TokenType.TSTZRANGE: 'TSTZRANGE'>
TSTZMULTIRANGE = <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>
DATERANGE = <TokenType.DATERANGE: 'DATERANGE'>
DATEMULTIRANGE = <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>
UUID = <TokenType.UUID: 'UUID'>
GEOGRAPHY = <TokenType.GEOGRAPHY: 'GEOGRAPHY'>
GEOGRAPHYPOINT = <TokenType.GEOGRAPHYPOINT: 'GEOGRAPHYPOINT'>
NULLABLE = <TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY = <TokenType.GEOMETRY: 'GEOMETRY'>
POINT = <TokenType.POINT: 'POINT'>
RING = <TokenType.RING: 'RING'>
LINESTRING = <TokenType.LINESTRING: 'LINESTRING'>
LOCALTIME = <TokenType.LOCALTIME: 'LOCALTIME'>
LOCALTIMESTAMP = <TokenType.LOCALTIMESTAMP: 'LOCALTIMESTAMP'>
SYSTIMESTAMP = <TokenType.SYSTIMESTAMP: 'SYSTIMESTAMP'>
MULTILINESTRING = <TokenType.MULTILINESTRING: 'MULTILINESTRING'>
POLYGON = <TokenType.POLYGON: 'POLYGON'>
MULTIPOLYGON = <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>
HLLSKETCH = <TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE = <TokenType.HSTORE: 'HSTORE'>
SUPER = <TokenType.SUPER: 'SUPER'>
SERIAL = <TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL = <TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL = <TokenType.BIGSERIAL: 'BIGSERIAL'>
XML = <TokenType.XML: 'XML'>
YEAR = <TokenType.YEAR: 'YEAR'>
USERDEFINED = <TokenType.USERDEFINED: 'USERDEFINED'>
MONEY = <TokenType.MONEY: 'MONEY'>
SMALLMONEY = <TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION = <TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE = <TokenType.IMAGE: 'IMAGE'>
VARIANT = <TokenType.VARIANT: 'VARIANT'>
OBJECT = <TokenType.OBJECT: 'OBJECT'>
INET = <TokenType.INET: 'INET'>
IPADDRESS = <TokenType.IPADDRESS: 'IPADDRESS'>
IPPREFIX = <TokenType.IPPREFIX: 'IPPREFIX'>
IPV4 = <TokenType.IPV4: 'IPV4'>
IPV6 = <TokenType.IPV6: 'IPV6'>
ENUM = <TokenType.ENUM: 'ENUM'>
ENUM8 = <TokenType.ENUM8: 'ENUM8'>
ENUM16 = <TokenType.ENUM16: 'ENUM16'>
FIXEDSTRING = <TokenType.FIXEDSTRING: 'FIXEDSTRING'>
LOWCARDINALITY = <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>
NESTED = <TokenType.NESTED: 'NESTED'>
AGGREGATEFUNCTION = <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>
SIMPLEAGGREGATEFUNCTION = <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>
TDIGEST = <TokenType.TDIGEST: 'TDIGEST'>
UNKNOWN = <TokenType.UNKNOWN: 'UNKNOWN'>
VECTOR = <TokenType.VECTOR: 'VECTOR'>
DYNAMIC = <TokenType.DYNAMIC: 'DYNAMIC'>
VOID = <TokenType.VOID: 'VOID'>
ALIAS = <TokenType.ALIAS: 'ALIAS'>
ALTER = <TokenType.ALTER: 'ALTER'>
ALL = <TokenType.ALL: 'ALL'>
ANTI = <TokenType.ANTI: 'ANTI'>
ANY = <TokenType.ANY: 'ANY'>
APPLY = <TokenType.APPLY: 'APPLY'>
ARRAY = <TokenType.ARRAY: 'ARRAY'>
ASC = <TokenType.ASC: 'ASC'>
ASOF = <TokenType.ASOF: 'ASOF'>
ATTACH = <TokenType.ATTACH: 'ATTACH'>
AUTO_INCREMENT = <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN = <TokenType.BEGIN: 'BEGIN'>
BETWEEN = <TokenType.BETWEEN: 'BETWEEN'>
BULK_COLLECT_INTO = <TokenType.BULK_COLLECT_INTO: 'BULK_COLLECT_INTO'>
CACHE = <TokenType.CACHE: 'CACHE'>
CASE = <TokenType.CASE: 'CASE'>
CHARACTER_SET = <TokenType.CHARACTER_SET: 'CHARACTER_SET'>
CLUSTER_BY = <TokenType.CLUSTER_BY: 'CLUSTER_BY'>
COLLATE = <TokenType.COLLATE: 'COLLATE'>
COMMAND = <TokenType.COMMAND: 'COMMAND'>
COMMENT = <TokenType.COMMENT: 'COMMENT'>
COMMIT = <TokenType.COMMIT: 'COMMIT'>
CONNECT_BY = <TokenType.CONNECT_BY: 'CONNECT_BY'>
CONSTRAINT = <TokenType.CONSTRAINT: 'CONSTRAINT'>
COPY = <TokenType.COPY: 'COPY'>
CREATE = <TokenType.CREATE: 'CREATE'>
CROSS = <TokenType.CROSS: 'CROSS'>
CUBE = <TokenType.CUBE: 'CUBE'>
CURRENT_DATE = <TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME = <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_SCHEMA = <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>
CURRENT_TIME = <TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP = <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
CURRENT_USER = <TokenType.CURRENT_USER: 'CURRENT_USER'>
CURRENT_ROLE = <TokenType.CURRENT_ROLE: 'CURRENT_ROLE'>
CURRENT_CATALOG = <TokenType.CURRENT_CATALOG: 'CURRENT_CATALOG'>
DECLARE = <TokenType.DECLARE: 'DECLARE'>
DEFAULT = <TokenType.DEFAULT: 'DEFAULT'>
DELETE = <TokenType.DELETE: 'DELETE'>
DESC = <TokenType.DESC: 'DESC'>
DESCRIBE = <TokenType.DESCRIBE: 'DESCRIBE'>
DETACH = <TokenType.DETACH: 'DETACH'>
DICTIONARY = <TokenType.DICTIONARY: 'DICTIONARY'>
DISTINCT = <TokenType.DISTINCT: 'DISTINCT'>
DISTRIBUTE_BY = <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>
DIV = <TokenType.DIV: 'DIV'>
DROP = <TokenType.DROP: 'DROP'>
ELSE = <TokenType.ELSE: 'ELSE'>
END = <TokenType.END: 'END'>
ESCAPE = <TokenType.ESCAPE: 'ESCAPE'>
EXCEPT = <TokenType.EXCEPT: 'EXCEPT'>
EXECUTE = <TokenType.EXECUTE: 'EXECUTE'>
EXISTS = <TokenType.EXISTS: 'EXISTS'>
FALSE = <TokenType.FALSE: 'FALSE'>
FETCH = <TokenType.FETCH: 'FETCH'>
FILE = <TokenType.FILE: 'FILE'>
FILE_FORMAT = <TokenType.FILE_FORMAT: 'FILE_FORMAT'>
FILTER = <TokenType.FILTER: 'FILTER'>
FINAL = <TokenType.FINAL: 'FINAL'>
FIRST = <TokenType.FIRST: 'FIRST'>
FOR = <TokenType.FOR: 'FOR'>
FORCE = <TokenType.FORCE: 'FORCE'>
FOREIGN_KEY = <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT = <TokenType.FORMAT: 'FORMAT'>
FROM = <TokenType.FROM: 'FROM'>
FULL = <TokenType.FULL: 'FULL'>
FUNCTION = <TokenType.FUNCTION: 'FUNCTION'>
GET = <TokenType.GET: 'GET'>
GLOB = <TokenType.GLOB: 'GLOB'>
GLOBAL = <TokenType.GLOBAL: 'GLOBAL'>
GRANT = <TokenType.GRANT: 'GRANT'>
GROUP_BY = <TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS = <TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING = <TokenType.HAVING: 'HAVING'>
HINT = <TokenType.HINT: 'HINT'>
IGNORE = <TokenType.IGNORE: 'IGNORE'>
ILIKE = <TokenType.ILIKE: 'ILIKE'>
IN = <TokenType.IN: 'IN'>
INDEX = <TokenType.INDEX: 'INDEX'>
INDEXED_BY = <TokenType.INDEXED_BY: 'INDEXED_BY'>
INNER = <TokenType.INNER: 'INNER'>
INSERT = <TokenType.INSERT: 'INSERT'>
INSTALL = <TokenType.INSTALL: 'INSTALL'>
INTERSECT = <TokenType.INTERSECT: 'INTERSECT'>
INTERVAL = <TokenType.INTERVAL: 'INTERVAL'>
INTO = <TokenType.INTO: 'INTO'>
INTRODUCER = <TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE = <TokenType.IRLIKE: 'IRLIKE'>
IS = <TokenType.IS: 'IS'>
ISNULL = <TokenType.ISNULL: 'ISNULL'>
JOIN = <TokenType.JOIN: 'JOIN'>
JOIN_MARKER = <TokenType.JOIN_MARKER: 'JOIN_MARKER'>
KEEP = <TokenType.KEEP: 'KEEP'>
KEY = <TokenType.KEY: 'KEY'>
KILL = <TokenType.KILL: 'KILL'>
LANGUAGE = <TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL = <TokenType.LATERAL: 'LATERAL'>
LEFT = <TokenType.LEFT: 'LEFT'>
LIKE = <TokenType.LIKE: 'LIKE'>
LIMIT = <TokenType.LIMIT: 'LIMIT'>
LIST = <TokenType.LIST: 'LIST'>
LOAD = <TokenType.LOAD: 'LOAD'>
LOCK = <TokenType.LOCK: 'LOCK'>
MAP = <TokenType.MAP: 'MAP'>
MATCH = <TokenType.MATCH: 'MATCH'>
MATCH_CONDITION = <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>
MATCH_RECOGNIZE = <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MEMBER_OF = <TokenType.MEMBER_OF: 'MEMBER_OF'>
MERGE = <TokenType.MERGE: 'MERGE'>
MOD = <TokenType.MOD: 'MOD'>
MODEL = <TokenType.MODEL: 'MODEL'>
NATURAL = <TokenType.NATURAL: 'NATURAL'>
NEXT = <TokenType.NEXT: 'NEXT'>
NOTHING = <TokenType.NOTHING: 'NOTHING'>
NOTNULL = <TokenType.NOTNULL: 'NOTNULL'>
NULL = <TokenType.NULL: 'NULL'>
OBJECT_IDENTIFIER = <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>
OFFSET = <TokenType.OFFSET: 'OFFSET'>
ON = <TokenType.ON: 'ON'>
ONLY = <TokenType.ONLY: 'ONLY'>
OPERATOR = <TokenType.OPERATOR: 'OPERATOR'>
ORDER_BY = <TokenType.ORDER_BY: 'ORDER_BY'>
ORDER_SIBLINGS_BY = <TokenType.ORDER_SIBLINGS_BY: 'ORDER_SIBLINGS_BY'>
ORDERED = <TokenType.ORDERED: 'ORDERED'>
ORDINALITY = <TokenType.ORDINALITY: 'ORDINALITY'>
OUT = <TokenType.OUT: 'OUT'>
INOUT = <TokenType.INOUT: 'INOUT'>
OUTER = <TokenType.OUTER: 'OUTER'>
OVER = <TokenType.OVER: 'OVER'>
OVERLAPS = <TokenType.OVERLAPS: 'OVERLAPS'>
OVERWRITE = <TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION = <TokenType.PARTITION: 'PARTITION'>
PARTITION_BY = <TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT = <TokenType.PERCENT: 'PERCENT'>
PIVOT = <TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER = <TokenType.PLACEHOLDER: 'PLACEHOLDER'>
POSITIONAL = <TokenType.POSITIONAL: 'POSITIONAL'>
PRAGMA = <TokenType.PRAGMA: 'PRAGMA'>
PREWHERE = <TokenType.PREWHERE: 'PREWHERE'>
PRIMARY_KEY = <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE = <TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES = <TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE = <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
PUT = <TokenType.PUT: 'PUT'>
QUALIFY = <TokenType.QUALIFY: 'QUALIFY'>
QUOTE = <TokenType.QUOTE: 'QUOTE'>
QDCOLON = <TokenType.QDCOLON: 'QDCOLON'>
RANGE = <TokenType.RANGE: 'RANGE'>
RECURSIVE = <TokenType.RECURSIVE: 'RECURSIVE'>
REFRESH = <TokenType.REFRESH: 'REFRESH'>
RENAME = <TokenType.RENAME: 'RENAME'>
REPLACE = <TokenType.REPLACE: 'REPLACE'>
RETURNING = <TokenType.RETURNING: 'RETURNING'>
REVOKE = <TokenType.REVOKE: 'REVOKE'>
REFERENCES = <TokenType.REFERENCES: 'REFERENCES'>
RIGHT = <TokenType.RIGHT: 'RIGHT'>
RLIKE = <TokenType.RLIKE: 'RLIKE'>
ROLLBACK = <TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP = <TokenType.ROLLUP: 'ROLLUP'>
ROW = <TokenType.ROW: 'ROW'>
ROWS = <TokenType.ROWS: 'ROWS'>
SELECT = <TokenType.SELECT: 'SELECT'>
SEMI = <TokenType.SEMI: 'SEMI'>
SEPARATOR = <TokenType.SEPARATOR: 'SEPARATOR'>
SEQUENCE = <TokenType.SEQUENCE: 'SEQUENCE'>
SERDE_PROPERTIES = <TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET = <TokenType.SET: 'SET'>
SETTINGS = <TokenType.SETTINGS: 'SETTINGS'>
SHOW = <TokenType.SHOW: 'SHOW'>
SIMILAR_TO = <TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME = <TokenType.SOME: 'SOME'>
SORT_BY = <TokenType.SORT_BY: 'SORT_BY'>
SOUNDS_LIKE = <TokenType.SOUNDS_LIKE: 'SOUNDS_LIKE'>
START_WITH = <TokenType.START_WITH: 'START_WITH'>
STORAGE_INTEGRATION = <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>
STRAIGHT_JOIN = <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>
STRUCT = <TokenType.STRUCT: 'STRUCT'>
SUMMARIZE = <TokenType.SUMMARIZE: 'SUMMARIZE'>
TABLE_SAMPLE = <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TAG = <TokenType.TAG: 'TAG'>
TEMPORARY = <TokenType.TEMPORARY: 'TEMPORARY'>
TOP = <TokenType.TOP: 'TOP'>
THEN = <TokenType.THEN: 'THEN'>
TRUE = <TokenType.TRUE: 'TRUE'>
TRUNCATE = <TokenType.TRUNCATE: 'TRUNCATE'>
UNCACHE = <TokenType.UNCACHE: 'UNCACHE'>
UNION = <TokenType.UNION: 'UNION'>
UNNEST = <TokenType.UNNEST: 'UNNEST'>
UNPIVOT = <TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE = <TokenType.UPDATE: 'UPDATE'>
USE = <TokenType.USE: 'USE'>
USING = <TokenType.USING: 'USING'>
VALUES = <TokenType.VALUES: 'VALUES'>
VARIADIC = <TokenType.VARIADIC: 'VARIADIC'>
VIEW = <TokenType.VIEW: 'VIEW'>
SEMANTIC_VIEW = <TokenType.SEMANTIC_VIEW: 'SEMANTIC_VIEW'>
VOLATILE = <TokenType.VOLATILE: 'VOLATILE'>
WHEN = <TokenType.WHEN: 'WHEN'>
WHERE = <TokenType.WHERE: 'WHERE'>
WINDOW = <TokenType.WINDOW: 'WINDOW'>
WITH = <TokenType.WITH: 'WITH'>
UNIQUE = <TokenType.UNIQUE: 'UNIQUE'>
UTC_DATE = <TokenType.UTC_DATE: 'UTC_DATE'>
UTC_TIME = <TokenType.UTC_TIME: 'UTC_TIME'>
UTC_TIMESTAMP = <TokenType.UTC_TIMESTAMP: 'UTC_TIMESTAMP'>
VERSION_SNAPSHOT = <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>
TIMESTAMP_SNAPSHOT = <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>
OPTION = <TokenType.OPTION: 'OPTION'>
SINK = <TokenType.SINK: 'SINK'>
SOURCE = <TokenType.SOURCE: 'SOURCE'>
ANALYZE = <TokenType.ANALYZE: 'ANALYZE'>
NAMESPACE = <TokenType.NAMESPACE: 'NAMESPACE'>
EXPORT = <TokenType.EXPORT: 'EXPORT'>
HIVE_TOKEN_STREAM = <TokenType.HIVE_TOKEN_STREAM: 'HIVE_TOKEN_STREAM'>
class Token:
473class Token:
474    __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments")
475
476    @classmethod
477    def number(cls, number: int) -> Token:
478        """Returns a NUMBER token with `number` as its text."""
479        return cls(TokenType.NUMBER, str(number))
480
481    @classmethod
482    def string(cls, string: str) -> Token:
483        """Returns a STRING token with `string` as its text."""
484        return cls(TokenType.STRING, string)
485
486    @classmethod
487    def identifier(cls, identifier: str) -> Token:
488        """Returns an IDENTIFIER token with `identifier` as its text."""
489        return cls(TokenType.IDENTIFIER, identifier)
490
491    @classmethod
492    def var(cls, var: str) -> Token:
493        """Returns an VAR token with `var` as its text."""
494        return cls(TokenType.VAR, var)
495
496    def __init__(
497        self,
498        token_type: TokenType,
499        text: str,
500        line: int = 1,
501        col: int = 1,
502        start: int = 0,
503        end: int = 0,
504        comments: t.Optional[t.List[str]] = None,
505    ) -> None:
506        """Token initializer.
507
508        Args:
509            token_type: The TokenType Enum.
510            text: The text of the token.
511            line: The line that the token ends on.
512            col: The column that the token ends on.
513            start: The start index of the token.
514            end: The ending index of the token.
515            comments: The comments to attach to the token.
516        """
517        self.token_type = token_type
518        self.text = text
519        self.line = line
520        self.col = col
521        self.start = start
522        self.end = end
523        self.comments = [] if comments is None else comments
524
525    def __repr__(self) -> str:
526        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
527        return f"<Token {attributes}>"
Token( token_type: TokenType, text: str, line: int = 1, col: int = 1, start: int = 0, end: int = 0, comments: Optional[List[str]] = None)
496    def __init__(
497        self,
498        token_type: TokenType,
499        text: str,
500        line: int = 1,
501        col: int = 1,
502        start: int = 0,
503        end: int = 0,
504        comments: t.Optional[t.List[str]] = None,
505    ) -> None:
506        """Token initializer.
507
508        Args:
509            token_type: The TokenType Enum.
510            text: The text of the token.
511            line: The line that the token ends on.
512            col: The column that the token ends on.
513            start: The start index of the token.
514            end: The ending index of the token.
515            comments: The comments to attach to the token.
516        """
517        self.token_type = token_type
518        self.text = text
519        self.line = line
520        self.col = col
521        self.start = start
522        self.end = end
523        self.comments = [] if comments is None else comments

Token initializer.

Arguments:
  • token_type: The TokenType Enum.
  • text: The text of the token.
  • line: The line that the token ends on.
  • col: The column that the token ends on.
  • start: The start index of the token.
  • end: The ending index of the token.
  • comments: The comments to attach to the token.
@classmethod
def number(cls, number: int) -> Token:
476    @classmethod
477    def number(cls, number: int) -> Token:
478        """Returns a NUMBER token with `number` as its text."""
479        return cls(TokenType.NUMBER, str(number))

Returns a NUMBER token with number as its text.

@classmethod
def string(cls, string: str) -> Token:
481    @classmethod
482    def string(cls, string: str) -> Token:
483        """Returns a STRING token with `string` as its text."""
484        return cls(TokenType.STRING, string)

Returns a STRING token with string as its text.

@classmethod
def identifier(cls, identifier: str) -> Token:
486    @classmethod
487    def identifier(cls, identifier: str) -> Token:
488        """Returns an IDENTIFIER token with `identifier` as its text."""
489        return cls(TokenType.IDENTIFIER, identifier)

Returns an IDENTIFIER token with identifier as its text.

@classmethod
def var(cls, var: str) -> Token:
491    @classmethod
492    def var(cls, var: str) -> Token:
493        """Returns an VAR token with `var` as its text."""
494        return cls(TokenType.VAR, var)

Returns an VAR token with var as its text.

token_type
text
line
col
start
end
comments
class Tokenizer:
 645class Tokenizer(metaclass=_Tokenizer):
 646    SINGLE_TOKENS = {
 647        "(": TokenType.L_PAREN,
 648        ")": TokenType.R_PAREN,
 649        "[": TokenType.L_BRACKET,
 650        "]": TokenType.R_BRACKET,
 651        "{": TokenType.L_BRACE,
 652        "}": TokenType.R_BRACE,
 653        "&": TokenType.AMP,
 654        "^": TokenType.CARET,
 655        ":": TokenType.COLON,
 656        ",": TokenType.COMMA,
 657        ".": TokenType.DOT,
 658        "-": TokenType.DASH,
 659        "=": TokenType.EQ,
 660        ">": TokenType.GT,
 661        "<": TokenType.LT,
 662        "%": TokenType.MOD,
 663        "!": TokenType.NOT,
 664        "|": TokenType.PIPE,
 665        "+": TokenType.PLUS,
 666        ";": TokenType.SEMICOLON,
 667        "/": TokenType.SLASH,
 668        "\\": TokenType.BACKSLASH,
 669        "*": TokenType.STAR,
 670        "~": TokenType.TILDE,
 671        "?": TokenType.PLACEHOLDER,
 672        "@": TokenType.PARAMETER,
 673        "#": TokenType.HASH,
 674        # Used for breaking a var like x'y' but nothing else the token type doesn't matter
 675        "'": TokenType.UNKNOWN,
 676        "`": TokenType.UNKNOWN,
 677        '"': TokenType.UNKNOWN,
 678    }
 679
 680    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 681    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 682    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 683    RAW_STRINGS: t.List[str | t.Tuple[str, str]] = []
 684    HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = []
 685    UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 686    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 687    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 688    STRING_ESCAPES = ["'"]
 689    BYTE_STRING_ESCAPES: t.List[str] = []
 690    VAR_SINGLE_TOKENS: t.Set[str] = set()
 691    ESCAPE_FOLLOW_CHARS: t.List[str] = []
 692
 693    # The strings in this list can always be used as escapes, regardless of the surrounding
 694    # identifier delimiters. By default, the closing delimiter is assumed to also act as an
 695    # identifier escape, e.g. if we use double-quotes, then they also act as escapes: "x"""
 696    IDENTIFIER_ESCAPES: t.List[str] = []
 697
 698    # Whether the heredoc tags follow the same lexical rules as unquoted identifiers
 699    HEREDOC_TAG_IS_IDENTIFIER = False
 700
 701    # Token that we'll generate as a fallback if the heredoc prefix doesn't correspond to a heredoc
 702    HEREDOC_STRING_ALTERNATIVE = TokenType.VAR
 703
 704    # Whether string escape characters function as such when placed within raw strings
 705    STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS = True
 706
 707    NESTED_COMMENTS = True
 708
 709    HINT_START = "/*+"
 710
 711    TOKENS_PRECEDING_HINT = {TokenType.SELECT, TokenType.INSERT, TokenType.UPDATE, TokenType.DELETE}
 712
 713    # Autofilled
 714    _COMMENTS: t.Dict[str, str] = {}
 715    _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {}
 716    _IDENTIFIERS: t.Dict[str, str] = {}
 717    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 718    _QUOTES: t.Dict[str, str] = {}
 719    _STRING_ESCAPES: t.Set[str] = set()
 720    _BYTE_STRING_ESCAPES: t.Set[str] = set()
 721    _KEYWORD_TRIE: t.Dict = {}
 722    _RS_TOKENIZER: t.Optional[t.Any] = None
 723    _ESCAPE_FOLLOW_CHARS: t.Set[str] = set()
 724
 725    KEYWORDS: t.Dict[str, TokenType] = {
 726        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
 727        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
 728        **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")},
 729        **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")},
 730        HINT_START: TokenType.HINT,
 731        "&<": TokenType.AMP_LT,
 732        "&>": TokenType.AMP_GT,
 733        "==": TokenType.EQ,
 734        "::": TokenType.DCOLON,
 735        "?::": TokenType.QDCOLON,
 736        "||": TokenType.DPIPE,
 737        "|>": TokenType.PIPE_GT,
 738        ">=": TokenType.GTE,
 739        "<=": TokenType.LTE,
 740        "<>": TokenType.NEQ,
 741        "!=": TokenType.NEQ,
 742        ":=": TokenType.COLON_EQ,
 743        "<=>": TokenType.NULLSAFE_EQ,
 744        "->": TokenType.ARROW,
 745        "->>": TokenType.DARROW,
 746        "=>": TokenType.FARROW,
 747        "#>": TokenType.HASH_ARROW,
 748        "#>>": TokenType.DHASH_ARROW,
 749        "<->": TokenType.LR_ARROW,
 750        "&&": TokenType.DAMP,
 751        "??": TokenType.DQMARK,
 752        "~~~": TokenType.GLOB,
 753        "~~": TokenType.LIKE,
 754        "~~*": TokenType.ILIKE,
 755        "~*": TokenType.IRLIKE,
 756        "-|-": TokenType.ADJACENT,
 757        "ALL": TokenType.ALL,
 758        "AND": TokenType.AND,
 759        "ANTI": TokenType.ANTI,
 760        "ANY": TokenType.ANY,
 761        "ASC": TokenType.ASC,
 762        "AS": TokenType.ALIAS,
 763        "ASOF": TokenType.ASOF,
 764        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 765        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 766        "BEGIN": TokenType.BEGIN,
 767        "BETWEEN": TokenType.BETWEEN,
 768        "CACHE": TokenType.CACHE,
 769        "UNCACHE": TokenType.UNCACHE,
 770        "CASE": TokenType.CASE,
 771        "CHARACTER SET": TokenType.CHARACTER_SET,
 772        "CLUSTER BY": TokenType.CLUSTER_BY,
 773        "COLLATE": TokenType.COLLATE,
 774        "COLUMN": TokenType.COLUMN,
 775        "COMMIT": TokenType.COMMIT,
 776        "CONNECT BY": TokenType.CONNECT_BY,
 777        "CONSTRAINT": TokenType.CONSTRAINT,
 778        "COPY": TokenType.COPY,
 779        "CREATE": TokenType.CREATE,
 780        "CROSS": TokenType.CROSS,
 781        "CUBE": TokenType.CUBE,
 782        "CURRENT_DATE": TokenType.CURRENT_DATE,
 783        "CURRENT_SCHEMA": TokenType.CURRENT_SCHEMA,
 784        "CURRENT_TIME": TokenType.CURRENT_TIME,
 785        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 786        "CURRENT_USER": TokenType.CURRENT_USER,
 787        "CURRENT_CATALOG": TokenType.CURRENT_CATALOG,
 788        "DATABASE": TokenType.DATABASE,
 789        "DEFAULT": TokenType.DEFAULT,
 790        "DELETE": TokenType.DELETE,
 791        "DESC": TokenType.DESC,
 792        "DESCRIBE": TokenType.DESCRIBE,
 793        "DISTINCT": TokenType.DISTINCT,
 794        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
 795        "DIV": TokenType.DIV,
 796        "DROP": TokenType.DROP,
 797        "ELSE": TokenType.ELSE,
 798        "END": TokenType.END,
 799        "ENUM": TokenType.ENUM,
 800        "ESCAPE": TokenType.ESCAPE,
 801        "EXCEPT": TokenType.EXCEPT,
 802        "EXECUTE": TokenType.EXECUTE,
 803        "EXISTS": TokenType.EXISTS,
 804        "FALSE": TokenType.FALSE,
 805        "FETCH": TokenType.FETCH,
 806        "FILTER": TokenType.FILTER,
 807        "FILE": TokenType.FILE,
 808        "FIRST": TokenType.FIRST,
 809        "FULL": TokenType.FULL,
 810        "FUNCTION": TokenType.FUNCTION,
 811        "FOR": TokenType.FOR,
 812        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 813        "FORMAT": TokenType.FORMAT,
 814        "FROM": TokenType.FROM,
 815        "GEOGRAPHY": TokenType.GEOGRAPHY,
 816        "GEOMETRY": TokenType.GEOMETRY,
 817        "GLOB": TokenType.GLOB,
 818        "GROUP BY": TokenType.GROUP_BY,
 819        "GROUPING SETS": TokenType.GROUPING_SETS,
 820        "HAVING": TokenType.HAVING,
 821        "ILIKE": TokenType.ILIKE,
 822        "IN": TokenType.IN,
 823        "INDEX": TokenType.INDEX,
 824        "INET": TokenType.INET,
 825        "INNER": TokenType.INNER,
 826        "INSERT": TokenType.INSERT,
 827        "INTERVAL": TokenType.INTERVAL,
 828        "INTERSECT": TokenType.INTERSECT,
 829        "INTO": TokenType.INTO,
 830        "IS": TokenType.IS,
 831        "ISNULL": TokenType.ISNULL,
 832        "JOIN": TokenType.JOIN,
 833        "KEEP": TokenType.KEEP,
 834        "KILL": TokenType.KILL,
 835        "LATERAL": TokenType.LATERAL,
 836        "LEFT": TokenType.LEFT,
 837        "LIKE": TokenType.LIKE,
 838        "LIMIT": TokenType.LIMIT,
 839        "LOAD": TokenType.LOAD,
 840        "LOCALTIME": TokenType.LOCALTIME,
 841        "LOCALTIMESTAMP": TokenType.LOCALTIMESTAMP,
 842        "LOCK": TokenType.LOCK,
 843        "MERGE": TokenType.MERGE,
 844        "NAMESPACE": TokenType.NAMESPACE,
 845        "NATURAL": TokenType.NATURAL,
 846        "NEXT": TokenType.NEXT,
 847        "NOT": TokenType.NOT,
 848        "NOTNULL": TokenType.NOTNULL,
 849        "NULL": TokenType.NULL,
 850        "OBJECT": TokenType.OBJECT,
 851        "OFFSET": TokenType.OFFSET,
 852        "ON": TokenType.ON,
 853        "OR": TokenType.OR,
 854        "XOR": TokenType.XOR,
 855        "ORDER BY": TokenType.ORDER_BY,
 856        "ORDINALITY": TokenType.ORDINALITY,
 857        "OUT": TokenType.OUT,
 858        "OUTER": TokenType.OUTER,
 859        "OVER": TokenType.OVER,
 860        "OVERLAPS": TokenType.OVERLAPS,
 861        "OVERWRITE": TokenType.OVERWRITE,
 862        "PARTITION": TokenType.PARTITION,
 863        "PARTITION BY": TokenType.PARTITION_BY,
 864        "PARTITIONED BY": TokenType.PARTITION_BY,
 865        "PARTITIONED_BY": TokenType.PARTITION_BY,
 866        "PERCENT": TokenType.PERCENT,
 867        "PIVOT": TokenType.PIVOT,
 868        "PRAGMA": TokenType.PRAGMA,
 869        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 870        "PROCEDURE": TokenType.PROCEDURE,
 871        "OPERATOR": TokenType.OPERATOR,
 872        "QUALIFY": TokenType.QUALIFY,
 873        "RANGE": TokenType.RANGE,
 874        "RECURSIVE": TokenType.RECURSIVE,
 875        "REGEXP": TokenType.RLIKE,
 876        "RENAME": TokenType.RENAME,
 877        "REPLACE": TokenType.REPLACE,
 878        "RETURNING": TokenType.RETURNING,
 879        "REFERENCES": TokenType.REFERENCES,
 880        "RIGHT": TokenType.RIGHT,
 881        "RLIKE": TokenType.RLIKE,
 882        "ROLLBACK": TokenType.ROLLBACK,
 883        "ROLLUP": TokenType.ROLLUP,
 884        "ROW": TokenType.ROW,
 885        "ROWS": TokenType.ROWS,
 886        "SCHEMA": TokenType.SCHEMA,
 887        "SELECT": TokenType.SELECT,
 888        "SEMI": TokenType.SEMI,
 889        "SESSION": TokenType.SESSION,
 890        "SESSION_USER": TokenType.SESSION_USER,
 891        "SET": TokenType.SET,
 892        "SETTINGS": TokenType.SETTINGS,
 893        "SHOW": TokenType.SHOW,
 894        "SIMILAR TO": TokenType.SIMILAR_TO,
 895        "SOME": TokenType.SOME,
 896        "SORT BY": TokenType.SORT_BY,
 897        "START WITH": TokenType.START_WITH,
 898        "STRAIGHT_JOIN": TokenType.STRAIGHT_JOIN,
 899        "TABLE": TokenType.TABLE,
 900        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 901        "TEMP": TokenType.TEMPORARY,
 902        "TEMPORARY": TokenType.TEMPORARY,
 903        "THEN": TokenType.THEN,
 904        "TRUE": TokenType.TRUE,
 905        "TRUNCATE": TokenType.TRUNCATE,
 906        "UNION": TokenType.UNION,
 907        "UNKNOWN": TokenType.UNKNOWN,
 908        "UNNEST": TokenType.UNNEST,
 909        "UNPIVOT": TokenType.UNPIVOT,
 910        "UPDATE": TokenType.UPDATE,
 911        "USE": TokenType.USE,
 912        "USING": TokenType.USING,
 913        "UUID": TokenType.UUID,
 914        "VALUES": TokenType.VALUES,
 915        "VIEW": TokenType.VIEW,
 916        "VOLATILE": TokenType.VOLATILE,
 917        "WHEN": TokenType.WHEN,
 918        "WHERE": TokenType.WHERE,
 919        "WINDOW": TokenType.WINDOW,
 920        "WITH": TokenType.WITH,
 921        "APPLY": TokenType.APPLY,
 922        "ARRAY": TokenType.ARRAY,
 923        "BIT": TokenType.BIT,
 924        "BOOL": TokenType.BOOLEAN,
 925        "BOOLEAN": TokenType.BOOLEAN,
 926        "BYTE": TokenType.TINYINT,
 927        "MEDIUMINT": TokenType.MEDIUMINT,
 928        "INT1": TokenType.TINYINT,
 929        "TINYINT": TokenType.TINYINT,
 930        "INT16": TokenType.SMALLINT,
 931        "SHORT": TokenType.SMALLINT,
 932        "SMALLINT": TokenType.SMALLINT,
 933        "HUGEINT": TokenType.INT128,
 934        "UHUGEINT": TokenType.UINT128,
 935        "INT2": TokenType.SMALLINT,
 936        "INTEGER": TokenType.INT,
 937        "INT": TokenType.INT,
 938        "INT4": TokenType.INT,
 939        "INT32": TokenType.INT,
 940        "INT64": TokenType.BIGINT,
 941        "INT128": TokenType.INT128,
 942        "INT256": TokenType.INT256,
 943        "LONG": TokenType.BIGINT,
 944        "BIGINT": TokenType.BIGINT,
 945        "INT8": TokenType.TINYINT,
 946        "UINT": TokenType.UINT,
 947        "UINT128": TokenType.UINT128,
 948        "UINT256": TokenType.UINT256,
 949        "DEC": TokenType.DECIMAL,
 950        "DECIMAL": TokenType.DECIMAL,
 951        "DECIMAL32": TokenType.DECIMAL32,
 952        "DECIMAL64": TokenType.DECIMAL64,
 953        "DECIMAL128": TokenType.DECIMAL128,
 954        "DECIMAL256": TokenType.DECIMAL256,
 955        "DECFLOAT": TokenType.DECFLOAT,
 956        "BIGDECIMAL": TokenType.BIGDECIMAL,
 957        "BIGNUMERIC": TokenType.BIGDECIMAL,
 958        "BIGNUM": TokenType.BIGNUM,
 959        "LIST": TokenType.LIST,
 960        "MAP": TokenType.MAP,
 961        "NULLABLE": TokenType.NULLABLE,
 962        "NUMBER": TokenType.DECIMAL,
 963        "NUMERIC": TokenType.DECIMAL,
 964        "FIXED": TokenType.DECIMAL,
 965        "REAL": TokenType.FLOAT,
 966        "FLOAT": TokenType.FLOAT,
 967        "FLOAT4": TokenType.FLOAT,
 968        "FLOAT8": TokenType.DOUBLE,
 969        "DOUBLE": TokenType.DOUBLE,
 970        "DOUBLE PRECISION": TokenType.DOUBLE,
 971        "JSON": TokenType.JSON,
 972        "JSONB": TokenType.JSONB,
 973        "CHAR": TokenType.CHAR,
 974        "CHARACTER": TokenType.CHAR,
 975        "CHAR VARYING": TokenType.VARCHAR,
 976        "CHARACTER VARYING": TokenType.VARCHAR,
 977        "NCHAR": TokenType.NCHAR,
 978        "VARCHAR": TokenType.VARCHAR,
 979        "VARCHAR2": TokenType.VARCHAR,
 980        "NVARCHAR": TokenType.NVARCHAR,
 981        "NVARCHAR2": TokenType.NVARCHAR,
 982        "BPCHAR": TokenType.BPCHAR,
 983        "STR": TokenType.TEXT,
 984        "STRING": TokenType.TEXT,
 985        "TEXT": TokenType.TEXT,
 986        "LONGTEXT": TokenType.LONGTEXT,
 987        "MEDIUMTEXT": TokenType.MEDIUMTEXT,
 988        "TINYTEXT": TokenType.TINYTEXT,
 989        "CLOB": TokenType.TEXT,
 990        "LONGVARCHAR": TokenType.TEXT,
 991        "BINARY": TokenType.BINARY,
 992        "BLOB": TokenType.VARBINARY,
 993        "LONGBLOB": TokenType.LONGBLOB,
 994        "MEDIUMBLOB": TokenType.MEDIUMBLOB,
 995        "TINYBLOB": TokenType.TINYBLOB,
 996        "BYTEA": TokenType.VARBINARY,
 997        "VARBINARY": TokenType.VARBINARY,
 998        "TIME": TokenType.TIME,
 999        "TIMETZ": TokenType.TIMETZ,
1000        "TIME_NS": TokenType.TIME_NS,
1001        "TIMESTAMP": TokenType.TIMESTAMP,
1002        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
1003        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
1004        "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
1005        "TIMESTAMPNTZ": TokenType.TIMESTAMPNTZ,
1006        "TIMESTAMP_NTZ": TokenType.TIMESTAMPNTZ,
1007        "DATE": TokenType.DATE,
1008        "DATETIME": TokenType.DATETIME,
1009        "INT4RANGE": TokenType.INT4RANGE,
1010        "INT4MULTIRANGE": TokenType.INT4MULTIRANGE,
1011        "INT8RANGE": TokenType.INT8RANGE,
1012        "INT8MULTIRANGE": TokenType.INT8MULTIRANGE,
1013        "NUMRANGE": TokenType.NUMRANGE,
1014        "NUMMULTIRANGE": TokenType.NUMMULTIRANGE,
1015        "TSRANGE": TokenType.TSRANGE,
1016        "TSMULTIRANGE": TokenType.TSMULTIRANGE,
1017        "TSTZRANGE": TokenType.TSTZRANGE,
1018        "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE,
1019        "DATERANGE": TokenType.DATERANGE,
1020        "DATEMULTIRANGE": TokenType.DATEMULTIRANGE,
1021        "UNIQUE": TokenType.UNIQUE,
1022        "VECTOR": TokenType.VECTOR,
1023        "STRUCT": TokenType.STRUCT,
1024        "SEQUENCE": TokenType.SEQUENCE,
1025        "VARIANT": TokenType.VARIANT,
1026        "ALTER": TokenType.ALTER,
1027        "ANALYZE": TokenType.ANALYZE,
1028        "CALL": TokenType.COMMAND,
1029        "COMMENT": TokenType.COMMENT,
1030        "EXPLAIN": TokenType.COMMAND,
1031        "GRANT": TokenType.GRANT,
1032        "REVOKE": TokenType.REVOKE,
1033        "OPTIMIZE": TokenType.COMMAND,
1034        "PREPARE": TokenType.COMMAND,
1035        "VACUUM": TokenType.COMMAND,
1036        "USER-DEFINED": TokenType.USERDEFINED,
1037        "FOR VERSION": TokenType.VERSION_SNAPSHOT,
1038        "FOR TIMESTAMP": TokenType.TIMESTAMP_SNAPSHOT,
1039    }
1040
1041    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
1042        " ": TokenType.SPACE,
1043        "\t": TokenType.SPACE,
1044        "\n": TokenType.BREAK,
1045        "\r": TokenType.BREAK,
1046    }
1047
1048    COMMANDS = {
1049        TokenType.COMMAND,
1050        TokenType.EXECUTE,
1051        TokenType.FETCH,
1052        TokenType.SHOW,
1053        TokenType.RENAME,
1054    }
1055
1056    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
1057
1058    # Handle numeric literals like in hive (3L = BIGINT)
1059    NUMERIC_LITERALS: t.Dict[str, str] = {}
1060
1061    COMMENTS = ["--", ("/*", "*/")]
1062
1063    __slots__ = (
1064        "sql",
1065        "size",
1066        "tokens",
1067        "dialect",
1068        "use_rs_tokenizer",
1069        "_start",
1070        "_current",
1071        "_line",
1072        "_col",
1073        "_comments",
1074        "_char",
1075        "_end",
1076        "_peek",
1077        "_prev_token_line",
1078        "_rs_dialect_settings",
1079    )
1080
1081    def __init__(
1082        self,
1083        dialect: DialectType = None,
1084        use_rs_tokenizer: t.Optional[bool] = None,
1085        **opts: t.Any,
1086    ) -> None:
1087        from sqlglot.dialects import Dialect
1088
1089        self.dialect = Dialect.get_or_raise(dialect)
1090
1091        # initialize `use_rs_tokenizer`, and allow it to be overwritten per Tokenizer instance
1092        self.use_rs_tokenizer = (
1093            use_rs_tokenizer if use_rs_tokenizer is not None else USE_RS_TOKENIZER
1094        )
1095
1096        if self.use_rs_tokenizer:
1097            self._rs_dialect_settings = RsTokenizerDialectSettings(
1098                unescaped_sequences=self.dialect.UNESCAPED_SEQUENCES,
1099                identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT,
1100                numbers_can_be_underscore_separated=self.dialect.NUMBERS_CAN_BE_UNDERSCORE_SEPARATED,
1101            )
1102
1103        self.reset()
1104
1105    def reset(self) -> None:
1106        self.sql = ""
1107        self.size = 0
1108        self.tokens: t.List[Token] = []
1109        self._start = 0
1110        self._current = 0
1111        self._line = 1
1112        self._col = 0
1113        self._comments: t.List[str] = []
1114
1115        self._char = ""
1116        self._end = False
1117        self._peek = ""
1118        self._prev_token_line = -1
1119
1120    def tokenize(self, sql: str) -> t.List[Token]:
1121        """Returns a list of tokens corresponding to the SQL string `sql`."""
1122        if self.use_rs_tokenizer:
1123            return self.tokenize_rs(sql)
1124
1125        self.reset()
1126        self.sql = sql
1127        self.size = len(sql)
1128
1129        try:
1130            self._scan()
1131        except Exception as e:
1132            start = max(self._current - 50, 0)
1133            end = min(self._current + 50, self.size - 1)
1134            context = self.sql[start:end]
1135            raise TokenError(f"Error tokenizing '{context}'") from e
1136
1137        return self.tokens
1138
1139    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
1140        while self.size and not self._end:
1141            current = self._current
1142
1143            # Skip spaces here rather than iteratively calling advance() for performance reasons
1144            while current < self.size:
1145                char = self.sql[current]
1146
1147                if char.isspace() and (char == " " or char == "\t"):
1148                    current += 1
1149                else:
1150                    break
1151
1152            offset = current - self._current if current > self._current else 1
1153
1154            self._start = current
1155            self._advance(offset)
1156
1157            if not self._char.isspace():
1158                if self._char.isdigit():
1159                    self._scan_number()
1160                elif self._char in self._IDENTIFIERS:
1161                    self._scan_identifier(self._IDENTIFIERS[self._char])
1162                else:
1163                    self._scan_keywords()
1164
1165            if until and until():
1166                break
1167
1168        if self.tokens and self._comments:
1169            self.tokens[-1].comments.extend(self._comments)
1170
1171    def _chars(self, size: int) -> str:
1172        if size == 1:
1173            return self._char
1174
1175        start = self._current - 1
1176        end = start + size
1177
1178        return self.sql[start:end] if end <= self.size else ""
1179
1180    def _advance(self, i: int = 1, alnum: bool = False) -> None:
1181        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
1182            # Ensures we don't count an extra line if we get a \r\n line break sequence
1183            if not (self._char == "\r" and self._peek == "\n"):
1184                self._col = i
1185                self._line += 1
1186        else:
1187            self._col += i
1188
1189        self._current += i
1190        self._end = self._current >= self.size
1191        self._char = self.sql[self._current - 1]
1192        self._peek = "" if self._end else self.sql[self._current]
1193
1194        if alnum and self._char.isalnum():
1195            # Here we use local variables instead of attributes for better performance
1196            _col = self._col
1197            _current = self._current
1198            _end = self._end
1199            _peek = self._peek
1200
1201            while _peek.isalnum():
1202                _col += 1
1203                _current += 1
1204                _end = _current >= self.size
1205                _peek = "" if _end else self.sql[_current]
1206
1207            self._col = _col
1208            self._current = _current
1209            self._end = _end
1210            self._peek = _peek
1211            self._char = self.sql[_current - 1]
1212
1213    @property
1214    def _text(self) -> str:
1215        return self.sql[self._start : self._current]
1216
1217    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
1218        self._prev_token_line = self._line
1219
1220        if self._comments and token_type == TokenType.SEMICOLON and self.tokens:
1221            self.tokens[-1].comments.extend(self._comments)
1222            self._comments = []
1223
1224        self.tokens.append(
1225            Token(
1226                token_type,
1227                text=self._text if text is None else text,
1228                line=self._line,
1229                col=self._col,
1230                start=self._start,
1231                end=self._current - 1,
1232                comments=self._comments,
1233            )
1234        )
1235        self._comments = []
1236
1237        # If we have either a semicolon or a begin token before the command's token, we'll parse
1238        # whatever follows the command's token as a string
1239        if (
1240            token_type in self.COMMANDS
1241            and self._peek != ";"
1242            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
1243        ):
1244            start = self._current
1245            tokens = len(self.tokens)
1246            self._scan(lambda: self._peek == ";")
1247            self.tokens = self.tokens[:tokens]
1248            text = self.sql[start : self._current].strip()
1249            if text:
1250                self._add(TokenType.STRING, text)
1251
1252    def _scan_keywords(self) -> None:
1253        size = 0
1254        word = None
1255        chars = self._text
1256        char = chars
1257        prev_space = False
1258        skip = False
1259        trie = self._KEYWORD_TRIE
1260        single_token = char in self.SINGLE_TOKENS
1261
1262        while chars:
1263            if skip:
1264                result = TrieResult.PREFIX
1265            else:
1266                result, trie = in_trie(trie, char.upper())
1267
1268            if result == TrieResult.FAILED:
1269                break
1270            if result == TrieResult.EXISTS:
1271                word = chars
1272
1273            end = self._current + size
1274            size += 1
1275
1276            if end < self.size:
1277                char = self.sql[end]
1278                single_token = single_token or char in self.SINGLE_TOKENS
1279                is_space = char.isspace()
1280
1281                if not is_space or not prev_space:
1282                    if is_space:
1283                        char = " "
1284                    chars += char
1285                    prev_space = is_space
1286                    skip = False
1287                else:
1288                    skip = True
1289            else:
1290                char = ""
1291                break
1292
1293        if word:
1294            if self._scan_string(word):
1295                return
1296            if self._scan_comment(word):
1297                return
1298            if prev_space or single_token or not char:
1299                self._advance(size - 1)
1300                word = word.upper()
1301                self._add(self.KEYWORDS[word], text=word)
1302                return
1303
1304        if self._char in self.SINGLE_TOKENS:
1305            self._add(self.SINGLE_TOKENS[self._char], text=self._char)
1306            return
1307
1308        self._scan_var()
1309
1310    def _scan_comment(self, comment_start: str) -> bool:
1311        if comment_start not in self._COMMENTS:
1312            return False
1313
1314        comment_start_line = self._line
1315        comment_start_size = len(comment_start)
1316        comment_end = self._COMMENTS[comment_start]
1317
1318        if comment_end:
1319            # Skip the comment's start delimiter
1320            self._advance(comment_start_size)
1321
1322            comment_count = 1
1323            comment_end_size = len(comment_end)
1324
1325            while not self._end:
1326                if self._chars(comment_end_size) == comment_end:
1327                    comment_count -= 1
1328                    if not comment_count:
1329                        break
1330
1331                self._advance(alnum=True)
1332
1333                # Nested comments are allowed by some dialects, e.g. databricks, duckdb, postgres
1334                if (
1335                    self.NESTED_COMMENTS
1336                    and not self._end
1337                    and self._chars(comment_end_size) == comment_start
1338                ):
1339                    self._advance(comment_start_size)
1340                    comment_count += 1
1341
1342            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])
1343            self._advance(comment_end_size - 1)
1344        else:
1345            while not self._end and self.WHITE_SPACE.get(self._peek) is not TokenType.BREAK:
1346                self._advance(alnum=True)
1347            self._comments.append(self._text[comment_start_size:])
1348
1349        if (
1350            comment_start == self.HINT_START
1351            and self.tokens
1352            and self.tokens[-1].token_type in self.TOKENS_PRECEDING_HINT
1353        ):
1354            self._add(TokenType.HINT)
1355
1356        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
1357        # Multiple consecutive comments are preserved by appending them to the current comments list.
1358        if comment_start_line == self._prev_token_line:
1359            self.tokens[-1].comments.extend(self._comments)
1360            self._comments = []
1361            self._prev_token_line = self._line
1362
1363        return True
1364
1365    def _scan_number(self) -> None:
1366        if self._char == "0":
1367            peek = self._peek.upper()
1368            if peek == "B":
1369                return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER)
1370            elif peek == "X":
1371                return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER)
1372
1373        decimal = False
1374        scientific = 0
1375
1376        while True:
1377            if self._peek.isdigit():
1378                self._advance()
1379            elif self._peek == "." and not decimal:
1380                if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER:
1381                    return self._add(TokenType.NUMBER)
1382                decimal = True
1383                self._advance()
1384            elif self._peek in ("-", "+") and scientific == 1:
1385                # Only consume +/- if followed by a digit
1386                if self._current + 1 < self.size and self.sql[self._current + 1].isdigit():
1387                    scientific += 1
1388                    self._advance()
1389                else:
1390                    return self._add(TokenType.NUMBER)
1391            elif self._peek.upper() == "E" and not scientific:
1392                scientific += 1
1393                self._advance()
1394            elif self._peek == "_" and self.dialect.NUMBERS_CAN_BE_UNDERSCORE_SEPARATED:
1395                self._advance()
1396            elif self._peek.isidentifier():
1397                number_text = self._text
1398                literal = ""
1399
1400                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:
1401                    literal += self._peek
1402                    self._advance()
1403
1404                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal.upper(), ""))
1405
1406                if token_type:
1407                    self._add(TokenType.NUMBER, number_text)
1408                    self._add(TokenType.DCOLON, "::")
1409                    return self._add(token_type, literal)
1410                elif self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT:
1411                    return self._add(TokenType.VAR)
1412
1413                self._advance(-len(literal))
1414                return self._add(TokenType.NUMBER, number_text)
1415            else:
1416                return self._add(TokenType.NUMBER)
1417
1418    def _scan_bits(self) -> None:
1419        self._advance()
1420        value = self._extract_value()
1421        try:
1422            # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier
1423            int(value, 2)
1424            self._add(TokenType.BIT_STRING, value[2:])  # Drop the 0b
1425        except ValueError:
1426            self._add(TokenType.IDENTIFIER)
1427
1428    def _scan_hex(self) -> None:
1429        self._advance()
1430        value = self._extract_value()
1431        try:
1432            # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier
1433            int(value, 16)
1434            self._add(TokenType.HEX_STRING, value[2:])  # Drop the 0x
1435        except ValueError:
1436            self._add(TokenType.IDENTIFIER)
1437
1438    def _extract_value(self) -> str:
1439        while True:
1440            char = self._peek.strip()
1441            if char and char not in self.SINGLE_TOKENS:
1442                self._advance(alnum=True)
1443            else:
1444                break
1445
1446        return self._text
1447
1448    def _scan_string(self, start: str) -> bool:
1449        base = None
1450        token_type = TokenType.STRING
1451
1452        if start in self._QUOTES:
1453            end = self._QUOTES[start]
1454        elif start in self._FORMAT_STRINGS:
1455            end, token_type = self._FORMAT_STRINGS[start]
1456
1457            if token_type == TokenType.HEX_STRING:
1458                base = 16
1459            elif token_type == TokenType.BIT_STRING:
1460                base = 2
1461            elif token_type == TokenType.HEREDOC_STRING:
1462                self._advance()
1463
1464                if self._char == end:
1465                    tag = ""
1466                else:
1467                    tag = self._extract_string(
1468                        end,
1469                        raw_string=True,
1470                        raise_unmatched=not self.HEREDOC_TAG_IS_IDENTIFIER,
1471                    )
1472
1473                if (
1474                    tag
1475                    and self.HEREDOC_TAG_IS_IDENTIFIER
1476                    and (self._end or tag.isdigit() or any(c.isspace() for c in tag))
1477                ):
1478                    if not self._end:
1479                        self._advance(-1)
1480
1481                    self._advance(-len(tag))
1482                    self._add(self.HEREDOC_STRING_ALTERNATIVE)
1483                    return True
1484
1485                end = f"{start}{tag}{end}"
1486        else:
1487            return False
1488
1489        self._advance(len(start))
1490        text = self._extract_string(
1491            end,
1492            escapes=(
1493                self._BYTE_STRING_ESCAPES
1494                if token_type == TokenType.BYTE_STRING
1495                else self._STRING_ESCAPES
1496            ),
1497            raw_string=token_type == TokenType.RAW_STRING,
1498        )
1499
1500        if base and text:
1501            try:
1502                int(text, base)
1503            except Exception:
1504                raise TokenError(
1505                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1506                )
1507
1508        self._add(token_type, text)
1509        return True
1510
1511    def _scan_identifier(self, identifier_end: str) -> None:
1512        self._advance()
1513        text = self._extract_string(
1514            identifier_end, escapes=self._IDENTIFIER_ESCAPES | {identifier_end}
1515        )
1516        self._add(TokenType.IDENTIFIER, text)
1517
1518    def _scan_var(self) -> None:
1519        while True:
1520            char = self._peek.strip()
1521            if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS):
1522                self._advance(alnum=True)
1523            else:
1524                break
1525
1526        self._add(
1527            TokenType.VAR
1528            if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER
1529            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1530        )
1531
1532    def _extract_string(
1533        self,
1534        delimiter: str,
1535        escapes: t.Optional[t.Set[str]] = None,
1536        raw_string: bool = False,
1537        raise_unmatched: bool = True,
1538    ) -> str:
1539        text = ""
1540        delim_size = len(delimiter)
1541        escapes = self._STRING_ESCAPES if escapes is None else escapes
1542
1543        while True:
1544            if (
1545                not raw_string
1546                and self.dialect.UNESCAPED_SEQUENCES
1547                and self._peek
1548                and self._char in escapes
1549            ):
1550                unescaped_sequence = self.dialect.UNESCAPED_SEQUENCES.get(self._char + self._peek)
1551                if unescaped_sequence:
1552                    self._advance(2)
1553                    text += unescaped_sequence
1554                    continue
1555
1556            is_valid_custom_escape = (
1557                self.ESCAPE_FOLLOW_CHARS
1558                and self._char == "\\"
1559                and self._peek not in self.ESCAPE_FOLLOW_CHARS
1560            )
1561
1562            if (
1563                (self.STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS or not raw_string)
1564                and self._char in escapes
1565                and (self._peek == delimiter or self._peek in escapes or is_valid_custom_escape)
1566                and (self._char not in self._QUOTES or self._char == self._peek)
1567            ):
1568                if self._peek == delimiter:
1569                    text += self._peek
1570                elif is_valid_custom_escape and self._char != self._peek:
1571                    text += self._peek
1572                else:
1573                    text += self._char + self._peek
1574
1575                if self._current + 1 < self.size:
1576                    self._advance(2)
1577                else:
1578                    raise TokenError(f"Missing {delimiter} from {self._line}:{self._current}")
1579            else:
1580                if self._chars(delim_size) == delimiter:
1581                    if delim_size > 1:
1582                        self._advance(delim_size - 1)
1583                    break
1584
1585                if self._end:
1586                    if not raise_unmatched:
1587                        return text + self._char
1588
1589                    raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}")
1590
1591                current = self._current - 1
1592                self._advance(alnum=True)
1593                text += self.sql[current : self._current - 1]
1594
1595        return text
1596
1597    def tokenize_rs(self, sql: str) -> t.List[Token]:
1598        if not self._RS_TOKENIZER:
1599            raise SqlglotError("Rust tokenizer is not available")
1600
1601        tokens, error_msg = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings)
1602        for token in tokens:
1603            token.token_type = _ALL_TOKEN_TYPES[token.token_type_index]
1604
1605        # Setting this here so partial token lists can be inspected even if there is a failure
1606        self.tokens = tokens
1607
1608        if error_msg is not None:
1609            raise TokenError(error_msg)
1610
1611        return tokens
Tokenizer( dialect: Union[str, sqlglot.dialects.Dialect, Type[sqlglot.dialects.Dialect], NoneType] = None, use_rs_tokenizer: Optional[bool] = None, **opts: Any)
1081    def __init__(
1082        self,
1083        dialect: DialectType = None,
1084        use_rs_tokenizer: t.Optional[bool] = None,
1085        **opts: t.Any,
1086    ) -> None:
1087        from sqlglot.dialects import Dialect
1088
1089        self.dialect = Dialect.get_or_raise(dialect)
1090
1091        # initialize `use_rs_tokenizer`, and allow it to be overwritten per Tokenizer instance
1092        self.use_rs_tokenizer = (
1093            use_rs_tokenizer if use_rs_tokenizer is not None else USE_RS_TOKENIZER
1094        )
1095
1096        if self.use_rs_tokenizer:
1097            self._rs_dialect_settings = RsTokenizerDialectSettings(
1098                unescaped_sequences=self.dialect.UNESCAPED_SEQUENCES,
1099                identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT,
1100                numbers_can_be_underscore_separated=self.dialect.NUMBERS_CAN_BE_UNDERSCORE_SEPARATED,
1101            )
1102
1103        self.reset()
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDE: 'TILDE'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>}
BIT_STRINGS: List[Union[str, Tuple[str, str]]] = []
BYTE_STRINGS: List[Union[str, Tuple[str, str]]] = []
HEX_STRINGS: List[Union[str, Tuple[str, str]]] = []
RAW_STRINGS: List[Union[str, Tuple[str, str]]] = []
HEREDOC_STRINGS: List[Union[str, Tuple[str, str]]] = []
UNICODE_STRINGS: List[Union[str, Tuple[str, str]]] = []
IDENTIFIERS: List[Union[str, Tuple[str, str]]] = ['"']
QUOTES: List[Union[str, Tuple[str, str]]] = ["'"]
STRING_ESCAPES = ["'"]
BYTE_STRING_ESCAPES: List[str] = []
VAR_SINGLE_TOKENS: Set[str] = set()
ESCAPE_FOLLOW_CHARS: List[str] = []
IDENTIFIER_ESCAPES: List[str] = []
HEREDOC_TAG_IS_IDENTIFIER = False
HEREDOC_STRING_ALTERNATIVE = <TokenType.VAR: 'VAR'>
STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS = True
NESTED_COMMENTS = True
HINT_START = '/*+'
TOKENS_PRECEDING_HINT = {<TokenType.INSERT: 'INSERT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.SELECT: 'SELECT'>, <TokenType.UPDATE: 'UPDATE'>}
KEYWORDS: Dict[str, TokenType] = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '&<': <TokenType.AMP_LT: 'AMP_LT'>, '&>': <TokenType.AMP_GT: 'AMP_GT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '?::': <TokenType.QDCOLON: 'QDCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '|>': <TokenType.PIPE_GT: 'PIPE_GT'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, '-|-': <TokenType.ADJACENT: 'ADJACENT'>, 'ALL': <TokenType.ALL: 'ALL'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_SCHEMA': <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'CURRENT_CATALOG': <TokenType.CURRENT_CATALOG: 'CURRENT_CATALOG'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FILE': <TokenType.FILE: 'FILE'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCALTIME': <TokenType.LOCALTIME: 'LOCALTIME'>, 'LOCALTIMESTAMP': <TokenType.LOCALTIMESTAMP: 'LOCALTIMESTAMP'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NAMESPACE': <TokenType.NAMESPACE: 'NAMESPACE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUT': <TokenType.OUT: 'OUT'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'OPERATOR': <TokenType.OPERATOR: 'OPERATOR'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SESSION': <TokenType.SESSION: 'SESSION'>, 'SESSION_USER': <TokenType.SESSION_USER: 'SESSION_USER'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT256': <TokenType.INT256: 'INT256'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'UINT128': <TokenType.UINT128: 'UINT128'>, 'UINT256': <TokenType.UINT256: 'UINT256'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'DECIMAL256': <TokenType.DECIMAL256: 'DECIMAL256'>, 'DECFLOAT': <TokenType.DECFLOAT: 'DECFLOAT'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUM': <TokenType.BIGNUM: 'BIGNUM'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIME_NS': <TokenType.TIME_NS: 'TIME_NS'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.ANALYZE: 'ANALYZE'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'REVOKE': <TokenType.REVOKE: 'REVOKE'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>}
WHITE_SPACE: Dict[Optional[str], TokenType] = {' ': <TokenType.SPACE: 'SPACE'>, '\t': <TokenType.SPACE: 'SPACE'>, '\n': <TokenType.BREAK: 'BREAK'>, '\r': <TokenType.BREAK: 'BREAK'>}
COMMANDS = {<TokenType.RENAME: 'RENAME'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.FETCH: 'FETCH'>}
COMMAND_PREFIX_TOKENS = {<TokenType.SEMICOLON: 'SEMICOLON'>, <TokenType.BEGIN: 'BEGIN'>}
NUMERIC_LITERALS: Dict[str, str] = {}
COMMENTS = ['--', ('/*', '*/')]
dialect
use_rs_tokenizer
def reset(self) -> None:
1105    def reset(self) -> None:
1106        self.sql = ""
1107        self.size = 0
1108        self.tokens: t.List[Token] = []
1109        self._start = 0
1110        self._current = 0
1111        self._line = 1
1112        self._col = 0
1113        self._comments: t.List[str] = []
1114
1115        self._char = ""
1116        self._end = False
1117        self._peek = ""
1118        self._prev_token_line = -1
def tokenize(self, sql: str) -> List[Token]:
1120    def tokenize(self, sql: str) -> t.List[Token]:
1121        """Returns a list of tokens corresponding to the SQL string `sql`."""
1122        if self.use_rs_tokenizer:
1123            return self.tokenize_rs(sql)
1124
1125        self.reset()
1126        self.sql = sql
1127        self.size = len(sql)
1128
1129        try:
1130            self._scan()
1131        except Exception as e:
1132            start = max(self._current - 50, 0)
1133            end = min(self._current + 50, self.size - 1)
1134            context = self.sql[start:end]
1135            raise TokenError(f"Error tokenizing '{context}'") from e
1136
1137        return self.tokens

Returns a list of tokens corresponding to the SQL string sql.

def tokenize_rs(self, sql: str) -> List[Token]:
1597    def tokenize_rs(self, sql: str) -> t.List[Token]:
1598        if not self._RS_TOKENIZER:
1599            raise SqlglotError("Rust tokenizer is not available")
1600
1601        tokens, error_msg = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings)
1602        for token in tokens:
1603            token.token_type = _ALL_TOKEN_TYPES[token.token_type_index]
1604
1605        # Setting this here so partial token lists can be inspected even if there is a failure
1606        self.tokens = tokens
1607
1608        if error_msg is not None:
1609            raise TokenError(error_msg)
1610
1611        return tokens
size
sql
tokens