sqlglot.tokens
1from __future__ import annotations 2 3import os 4import typing as t 5from enum import auto 6 7from sqlglot.errors import SqlglotError, TokenError 8from sqlglot.helper import AutoName 9from sqlglot.trie import TrieResult, in_trie, new_trie 10 11if t.TYPE_CHECKING: 12 from sqlglot.dialects.dialect import DialectType 13 14 15try: 16 from sqlglotrs import ( # type: ignore 17 Tokenizer as RsTokenizer, 18 TokenizerDialectSettings as RsTokenizerDialectSettings, 19 TokenizerSettings as RsTokenizerSettings, 20 TokenTypeSettings as RsTokenTypeSettings, 21 ) 22 23 USE_RS_TOKENIZER = os.environ.get("SQLGLOTRS_TOKENIZER", "1") == "1" 24except ImportError: 25 USE_RS_TOKENIZER = False 26 27 28class TokenType(AutoName): 29 L_PAREN = auto() 30 R_PAREN = auto() 31 L_BRACKET = auto() 32 R_BRACKET = auto() 33 L_BRACE = auto() 34 R_BRACE = auto() 35 COMMA = auto() 36 DOT = auto() 37 DASH = auto() 38 PLUS = auto() 39 COLON = auto() 40 DOTCOLON = auto() 41 DCOLON = auto() 42 DCOLONDOLLAR = auto() 43 DCOLONPERCENT = auto() 44 DQMARK = auto() 45 SEMICOLON = auto() 46 STAR = auto() 47 BACKSLASH = auto() 48 SLASH = auto() 49 LT = auto() 50 LTE = auto() 51 GT = auto() 52 GTE = auto() 53 NOT = auto() 54 EQ = auto() 55 NEQ = auto() 56 NULLSAFE_EQ = auto() 57 COLON_EQ = auto() 58 COLON_GT = auto() 59 NCOLON_GT = auto() 60 AND = auto() 61 OR = auto() 62 AMP = auto() 63 DPIPE = auto() 64 PIPE_GT = auto() 65 PIPE = auto() 66 PIPE_SLASH = auto() 67 DPIPE_SLASH = auto() 68 CARET = auto() 69 CARET_AT = auto() 70 TILDA = auto() 71 ARROW = auto() 72 DARROW = auto() 73 FARROW = auto() 74 HASH = auto() 75 HASH_ARROW = auto() 76 DHASH_ARROW = auto() 77 LR_ARROW = auto() 78 DAT = auto() 79 LT_AT = auto() 80 AT_GT = auto() 81 DOLLAR = auto() 82 PARAMETER = auto() 83 SESSION = auto() 84 SESSION_PARAMETER = auto() 85 DAMP = auto() 86 XOR = auto() 87 DSTAR = auto() 88 QMARK_AMP = auto() 89 QMARK_PIPE = auto() 90 HASH_DASH = auto() 91 EXCLAMATION = auto() 92 93 URI_START = auto() 94 95 BLOCK_START = auto() 96 BLOCK_END = auto() 97 98 SPACE = auto() 99 BREAK = auto() 100 101 STRING = auto() 102 NUMBER = auto() 103 IDENTIFIER = auto() 104 DATABASE = auto() 105 COLUMN = auto() 106 COLUMN_DEF = auto() 107 SCHEMA = auto() 108 TABLE = auto() 109 WAREHOUSE = auto() 110 STAGE = auto() 111 STREAMLIT = auto() 112 VAR = auto() 113 BIT_STRING = auto() 114 HEX_STRING = auto() 115 BYTE_STRING = auto() 116 NATIONAL_STRING = auto() 117 RAW_STRING = auto() 118 HEREDOC_STRING = auto() 119 UNICODE_STRING = auto() 120 121 # types 122 BIT = auto() 123 BOOLEAN = auto() 124 TINYINT = auto() 125 UTINYINT = auto() 126 SMALLINT = auto() 127 USMALLINT = auto() 128 MEDIUMINT = auto() 129 UMEDIUMINT = auto() 130 INT = auto() 131 UINT = auto() 132 BIGINT = auto() 133 UBIGINT = auto() 134 INT128 = auto() 135 UINT128 = auto() 136 INT256 = auto() 137 UINT256 = auto() 138 FLOAT = auto() 139 DOUBLE = auto() 140 UDOUBLE = auto() 141 DECIMAL = auto() 142 DECIMAL32 = auto() 143 DECIMAL64 = auto() 144 DECIMAL128 = auto() 145 DECIMAL256 = auto() 146 UDECIMAL = auto() 147 BIGDECIMAL = auto() 148 CHAR = auto() 149 NCHAR = auto() 150 VARCHAR = auto() 151 NVARCHAR = auto() 152 BPCHAR = auto() 153 TEXT = auto() 154 MEDIUMTEXT = auto() 155 LONGTEXT = auto() 156 BLOB = auto() 157 MEDIUMBLOB = auto() 158 LONGBLOB = auto() 159 TINYBLOB = auto() 160 TINYTEXT = auto() 161 NAME = auto() 162 BINARY = auto() 163 VARBINARY = auto() 164 JSON = auto() 165 JSONB = auto() 166 TIME = auto() 167 TIMETZ = auto() 168 TIMESTAMP = auto() 169 TIMESTAMPTZ = auto() 170 TIMESTAMPLTZ = auto() 171 TIMESTAMPNTZ = auto() 172 TIMESTAMP_S = auto() 173 TIMESTAMP_MS = auto() 174 TIMESTAMP_NS = auto() 175 DATETIME = auto() 176 DATETIME2 = auto() 177 DATETIME64 = auto() 178 SMALLDATETIME = auto() 179 DATE = auto() 180 DATE32 = auto() 181 INT4RANGE = auto() 182 INT4MULTIRANGE = auto() 183 INT8RANGE = auto() 184 INT8MULTIRANGE = auto() 185 NUMRANGE = auto() 186 NUMMULTIRANGE = auto() 187 TSRANGE = auto() 188 TSMULTIRANGE = auto() 189 TSTZRANGE = auto() 190 TSTZMULTIRANGE = auto() 191 DATERANGE = auto() 192 DATEMULTIRANGE = auto() 193 UUID = auto() 194 GEOGRAPHY = auto() 195 GEOGRAPHYPOINT = auto() 196 NULLABLE = auto() 197 GEOMETRY = auto() 198 POINT = auto() 199 RING = auto() 200 LINESTRING = auto() 201 MULTILINESTRING = auto() 202 POLYGON = auto() 203 MULTIPOLYGON = auto() 204 HLLSKETCH = auto() 205 HSTORE = auto() 206 SUPER = auto() 207 SERIAL = auto() 208 SMALLSERIAL = auto() 209 BIGSERIAL = auto() 210 XML = auto() 211 YEAR = auto() 212 USERDEFINED = auto() 213 MONEY = auto() 214 SMALLMONEY = auto() 215 ROWVERSION = auto() 216 IMAGE = auto() 217 VARIANT = auto() 218 OBJECT = auto() 219 INET = auto() 220 IPADDRESS = auto() 221 IPPREFIX = auto() 222 IPV4 = auto() 223 IPV6 = auto() 224 ENUM = auto() 225 ENUM8 = auto() 226 ENUM16 = auto() 227 FIXEDSTRING = auto() 228 LOWCARDINALITY = auto() 229 NESTED = auto() 230 AGGREGATEFUNCTION = auto() 231 SIMPLEAGGREGATEFUNCTION = auto() 232 TDIGEST = auto() 233 UNKNOWN = auto() 234 VECTOR = auto() 235 DYNAMIC = auto() 236 VOID = auto() 237 238 # keywords 239 ALIAS = auto() 240 ALTER = auto() 241 ALL = auto() 242 ANTI = auto() 243 ANY = auto() 244 APPLY = auto() 245 ARRAY = auto() 246 ASC = auto() 247 ASOF = auto() 248 ATTACH = auto() 249 AUTO_INCREMENT = auto() 250 BEGIN = auto() 251 BETWEEN = auto() 252 BULK_COLLECT_INTO = auto() 253 CACHE = auto() 254 CASE = auto() 255 CHARACTER_SET = auto() 256 CLUSTER_BY = auto() 257 COLLATE = auto() 258 COMMAND = auto() 259 COMMENT = auto() 260 COMMIT = auto() 261 CONNECT_BY = auto() 262 CONSTRAINT = auto() 263 COPY = auto() 264 CREATE = auto() 265 CROSS = auto() 266 CUBE = auto() 267 CURRENT_DATE = auto() 268 CURRENT_DATETIME = auto() 269 CURRENT_SCHEMA = auto() 270 CURRENT_TIME = auto() 271 CURRENT_TIMESTAMP = auto() 272 CURRENT_USER = auto() 273 DECLARE = auto() 274 DEFAULT = auto() 275 DELETE = auto() 276 DESC = auto() 277 DESCRIBE = auto() 278 DETACH = auto() 279 DICTIONARY = auto() 280 DISTINCT = auto() 281 DISTRIBUTE_BY = auto() 282 DIV = auto() 283 DROP = auto() 284 ELSE = auto() 285 END = auto() 286 ESCAPE = auto() 287 EXCEPT = auto() 288 EXECUTE = auto() 289 EXISTS = auto() 290 FALSE = auto() 291 FETCH = auto() 292 FILE_FORMAT = auto() 293 FILTER = auto() 294 FINAL = auto() 295 FIRST = auto() 296 FOR = auto() 297 FORCE = auto() 298 FOREIGN_KEY = auto() 299 FORMAT = auto() 300 FROM = auto() 301 FULL = auto() 302 FUNCTION = auto() 303 GET = auto() 304 GLOB = auto() 305 GLOBAL = auto() 306 GRANT = auto() 307 GROUP_BY = auto() 308 GROUPING_SETS = auto() 309 HAVING = auto() 310 HINT = auto() 311 IGNORE = auto() 312 ILIKE = auto() 313 IN = auto() 314 INDEX = auto() 315 INNER = auto() 316 INSERT = auto() 317 INSTALL = auto() 318 INTERSECT = auto() 319 INTERVAL = auto() 320 INTO = auto() 321 INTRODUCER = auto() 322 IRLIKE = auto() 323 IS = auto() 324 ISNULL = auto() 325 JOIN = auto() 326 JOIN_MARKER = auto() 327 KEEP = auto() 328 KEY = auto() 329 KILL = auto() 330 LANGUAGE = auto() 331 LATERAL = auto() 332 LEFT = auto() 333 LIKE = auto() 334 LIMIT = auto() 335 LIST = auto() 336 LOAD = auto() 337 LOCK = auto() 338 MAP = auto() 339 MATCH_CONDITION = auto() 340 MATCH_RECOGNIZE = auto() 341 MEMBER_OF = auto() 342 MERGE = auto() 343 MOD = auto() 344 MODEL = auto() 345 NATURAL = auto() 346 NEXT = auto() 347 NOTHING = auto() 348 NOTNULL = auto() 349 NULL = auto() 350 OBJECT_IDENTIFIER = auto() 351 OFFSET = auto() 352 ON = auto() 353 ONLY = auto() 354 OPERATOR = auto() 355 ORDER_BY = auto() 356 ORDER_SIBLINGS_BY = auto() 357 ORDERED = auto() 358 ORDINALITY = auto() 359 OUTER = auto() 360 OVER = auto() 361 OVERLAPS = auto() 362 OVERWRITE = auto() 363 PARTITION = auto() 364 PARTITION_BY = auto() 365 PERCENT = auto() 366 PIVOT = auto() 367 PLACEHOLDER = auto() 368 POSITIONAL = auto() 369 PRAGMA = auto() 370 PREWHERE = auto() 371 PRIMARY_KEY = auto() 372 PROCEDURE = auto() 373 PROPERTIES = auto() 374 PSEUDO_TYPE = auto() 375 PUT = auto() 376 QUALIFY = auto() 377 QUOTE = auto() 378 RANGE = auto() 379 RECURSIVE = auto() 380 REFRESH = auto() 381 RENAME = auto() 382 REPLACE = auto() 383 RETURNING = auto() 384 REVOKE = auto() 385 REFERENCES = auto() 386 RIGHT = auto() 387 RLIKE = auto() 388 ROLLBACK = auto() 389 ROLLUP = auto() 390 ROW = auto() 391 ROWS = auto() 392 SELECT = auto() 393 SEMI = auto() 394 SEPARATOR = auto() 395 SEQUENCE = auto() 396 SERDE_PROPERTIES = auto() 397 SET = auto() 398 SETTINGS = auto() 399 SHOW = auto() 400 SIMILAR_TO = auto() 401 SOME = auto() 402 SORT_BY = auto() 403 SOUNDS_LIKE = auto() 404 START_WITH = auto() 405 STORAGE_INTEGRATION = auto() 406 STRAIGHT_JOIN = auto() 407 STRUCT = auto() 408 SUMMARIZE = auto() 409 TABLE_SAMPLE = auto() 410 TAG = auto() 411 TEMPORARY = auto() 412 TOP = auto() 413 THEN = auto() 414 TRUE = auto() 415 TRUNCATE = auto() 416 UNCACHE = auto() 417 UNION = auto() 418 UNNEST = auto() 419 UNPIVOT = auto() 420 UPDATE = auto() 421 USE = auto() 422 USING = auto() 423 VALUES = auto() 424 VIEW = auto() 425 SEMANTIC_VIEW = auto() 426 VOLATILE = auto() 427 WHEN = auto() 428 WHERE = auto() 429 WINDOW = auto() 430 WITH = auto() 431 UNIQUE = auto() 432 UTC_DATE = auto() 433 UTC_TIME = auto() 434 UTC_TIMESTAMP = auto() 435 VERSION_SNAPSHOT = auto() 436 TIMESTAMP_SNAPSHOT = auto() 437 OPTION = auto() 438 SINK = auto() 439 SOURCE = auto() 440 ANALYZE = auto() 441 NAMESPACE = auto() 442 EXPORT = auto() 443 444 # sentinel 445 HIVE_TOKEN_STREAM = auto() 446 447 448_ALL_TOKEN_TYPES = list(TokenType) 449_TOKEN_TYPE_TO_INDEX = {token_type: i for i, token_type in enumerate(_ALL_TOKEN_TYPES)} 450 451 452class Token: 453 __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments") 454 455 @classmethod 456 def number(cls, number: int) -> Token: 457 """Returns a NUMBER token with `number` as its text.""" 458 return cls(TokenType.NUMBER, str(number)) 459 460 @classmethod 461 def string(cls, string: str) -> Token: 462 """Returns a STRING token with `string` as its text.""" 463 return cls(TokenType.STRING, string) 464 465 @classmethod 466 def identifier(cls, identifier: str) -> Token: 467 """Returns an IDENTIFIER token with `identifier` as its text.""" 468 return cls(TokenType.IDENTIFIER, identifier) 469 470 @classmethod 471 def var(cls, var: str) -> Token: 472 """Returns an VAR token with `var` as its text.""" 473 return cls(TokenType.VAR, var) 474 475 def __init__( 476 self, 477 token_type: TokenType, 478 text: str, 479 line: int = 1, 480 col: int = 1, 481 start: int = 0, 482 end: int = 0, 483 comments: t.Optional[t.List[str]] = None, 484 ) -> None: 485 """Token initializer. 486 487 Args: 488 token_type: The TokenType Enum. 489 text: The text of the token. 490 line: The line that the token ends on. 491 col: The column that the token ends on. 492 start: The start index of the token. 493 end: The ending index of the token. 494 comments: The comments to attach to the token. 495 """ 496 self.token_type = token_type 497 self.text = text 498 self.line = line 499 self.col = col 500 self.start = start 501 self.end = end 502 self.comments = [] if comments is None else comments 503 504 def __repr__(self) -> str: 505 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 506 return f"<Token {attributes}>" 507 508 509class _Tokenizer(type): 510 def __new__(cls, clsname, bases, attrs): 511 klass = super().__new__(cls, clsname, bases, attrs) 512 513 def _convert_quotes(arr: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]: 514 return dict( 515 (item, item) if isinstance(item, str) else (item[0], item[1]) for item in arr 516 ) 517 518 def _quotes_to_format( 519 token_type: TokenType, arr: t.List[str | t.Tuple[str, str]] 520 ) -> t.Dict[str, t.Tuple[str, TokenType]]: 521 return {k: (v, token_type) for k, v in _convert_quotes(arr).items()} 522 523 klass._QUOTES = _convert_quotes(klass.QUOTES) 524 klass._IDENTIFIERS = _convert_quotes(klass.IDENTIFIERS) 525 526 klass._FORMAT_STRINGS = { 527 **{ 528 p + s: (e, TokenType.NATIONAL_STRING) 529 for s, e in klass._QUOTES.items() 530 for p in ("n", "N") 531 }, 532 **_quotes_to_format(TokenType.BIT_STRING, klass.BIT_STRINGS), 533 **_quotes_to_format(TokenType.BYTE_STRING, klass.BYTE_STRINGS), 534 **_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS), 535 **_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS), 536 **_quotes_to_format(TokenType.HEREDOC_STRING, klass.HEREDOC_STRINGS), 537 **_quotes_to_format(TokenType.UNICODE_STRING, klass.UNICODE_STRINGS), 538 } 539 540 klass._STRING_ESCAPES = set(klass.STRING_ESCAPES) 541 klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES) 542 klass._COMMENTS = { 543 **dict( 544 (comment, None) if isinstance(comment, str) else (comment[0], comment[1]) 545 for comment in klass.COMMENTS 546 ), 547 "{#": "#}", # Ensure Jinja comments are tokenized correctly in all dialects 548 } 549 if klass.HINT_START in klass.KEYWORDS: 550 klass._COMMENTS[klass.HINT_START] = "*/" 551 552 klass._KEYWORD_TRIE = new_trie( 553 key.upper() 554 for key in ( 555 *klass.KEYWORDS, 556 *klass._COMMENTS, 557 *klass._QUOTES, 558 *klass._FORMAT_STRINGS, 559 ) 560 if " " in key or any(single in key for single in klass.SINGLE_TOKENS) 561 ) 562 563 if USE_RS_TOKENIZER: 564 settings = RsTokenizerSettings( 565 white_space={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.WHITE_SPACE.items()}, 566 single_tokens={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.SINGLE_TOKENS.items()}, 567 keywords={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.KEYWORDS.items()}, 568 numeric_literals=klass.NUMERIC_LITERALS, 569 identifiers=klass._IDENTIFIERS, 570 identifier_escapes=klass._IDENTIFIER_ESCAPES, 571 string_escapes=klass._STRING_ESCAPES, 572 quotes=klass._QUOTES, 573 format_strings={ 574 k: (v1, _TOKEN_TYPE_TO_INDEX[v2]) 575 for k, (v1, v2) in klass._FORMAT_STRINGS.items() 576 }, 577 has_bit_strings=bool(klass.BIT_STRINGS), 578 has_hex_strings=bool(klass.HEX_STRINGS), 579 comments=klass._COMMENTS, 580 var_single_tokens=klass.VAR_SINGLE_TOKENS, 581 commands={_TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMANDS}, 582 command_prefix_tokens={ 583 _TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMAND_PREFIX_TOKENS 584 }, 585 heredoc_tag_is_identifier=klass.HEREDOC_TAG_IS_IDENTIFIER, 586 string_escapes_allowed_in_raw_strings=klass.STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS, 587 nested_comments=klass.NESTED_COMMENTS, 588 hint_start=klass.HINT_START, 589 tokens_preceding_hint={ 590 _TOKEN_TYPE_TO_INDEX[v] for v in klass.TOKENS_PRECEDING_HINT 591 }, 592 ) 593 token_types = RsTokenTypeSettings( 594 bit_string=_TOKEN_TYPE_TO_INDEX[TokenType.BIT_STRING], 595 break_=_TOKEN_TYPE_TO_INDEX[TokenType.BREAK], 596 dcolon=_TOKEN_TYPE_TO_INDEX[TokenType.DCOLON], 597 heredoc_string=_TOKEN_TYPE_TO_INDEX[TokenType.HEREDOC_STRING], 598 raw_string=_TOKEN_TYPE_TO_INDEX[TokenType.RAW_STRING], 599 hex_string=_TOKEN_TYPE_TO_INDEX[TokenType.HEX_STRING], 600 identifier=_TOKEN_TYPE_TO_INDEX[TokenType.IDENTIFIER], 601 number=_TOKEN_TYPE_TO_INDEX[TokenType.NUMBER], 602 parameter=_TOKEN_TYPE_TO_INDEX[TokenType.PARAMETER], 603 semicolon=_TOKEN_TYPE_TO_INDEX[TokenType.SEMICOLON], 604 string=_TOKEN_TYPE_TO_INDEX[TokenType.STRING], 605 var=_TOKEN_TYPE_TO_INDEX[TokenType.VAR], 606 heredoc_string_alternative=_TOKEN_TYPE_TO_INDEX[klass.HEREDOC_STRING_ALTERNATIVE], 607 hint=_TOKEN_TYPE_TO_INDEX[TokenType.HINT], 608 ) 609 klass._RS_TOKENIZER = RsTokenizer(settings, token_types) 610 else: 611 klass._RS_TOKENIZER = None 612 613 return klass 614 615 616class Tokenizer(metaclass=_Tokenizer): 617 SINGLE_TOKENS = { 618 "(": TokenType.L_PAREN, 619 ")": TokenType.R_PAREN, 620 "[": TokenType.L_BRACKET, 621 "]": TokenType.R_BRACKET, 622 "{": TokenType.L_BRACE, 623 "}": TokenType.R_BRACE, 624 "&": TokenType.AMP, 625 "^": TokenType.CARET, 626 ":": TokenType.COLON, 627 ",": TokenType.COMMA, 628 ".": TokenType.DOT, 629 "-": TokenType.DASH, 630 "=": TokenType.EQ, 631 ">": TokenType.GT, 632 "<": TokenType.LT, 633 "%": TokenType.MOD, 634 "!": TokenType.NOT, 635 "|": TokenType.PIPE, 636 "+": TokenType.PLUS, 637 ";": TokenType.SEMICOLON, 638 "/": TokenType.SLASH, 639 "\\": TokenType.BACKSLASH, 640 "*": TokenType.STAR, 641 "~": TokenType.TILDA, 642 "?": TokenType.PLACEHOLDER, 643 "@": TokenType.PARAMETER, 644 "#": TokenType.HASH, 645 # Used for breaking a var like x'y' but nothing else the token type doesn't matter 646 "'": TokenType.UNKNOWN, 647 "`": TokenType.UNKNOWN, 648 '"': TokenType.UNKNOWN, 649 } 650 651 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 652 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 653 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 654 RAW_STRINGS: t.List[str | t.Tuple[str, str]] = [] 655 HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = [] 656 UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 657 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 658 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 659 STRING_ESCAPES = ["'"] 660 VAR_SINGLE_TOKENS: t.Set[str] = set() 661 662 # The strings in this list can always be used as escapes, regardless of the surrounding 663 # identifier delimiters. By default, the closing delimiter is assumed to also act as an 664 # identifier escape, e.g. if we use double-quotes, then they also act as escapes: "x""" 665 IDENTIFIER_ESCAPES: t.List[str] = [] 666 667 # Whether the heredoc tags follow the same lexical rules as unquoted identifiers 668 HEREDOC_TAG_IS_IDENTIFIER = False 669 670 # Token that we'll generate as a fallback if the heredoc prefix doesn't correspond to a heredoc 671 HEREDOC_STRING_ALTERNATIVE = TokenType.VAR 672 673 # Whether string escape characters function as such when placed within raw strings 674 STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS = True 675 676 NESTED_COMMENTS = True 677 678 HINT_START = "/*+" 679 680 TOKENS_PRECEDING_HINT = {TokenType.SELECT, TokenType.INSERT, TokenType.UPDATE, TokenType.DELETE} 681 682 # Autofilled 683 _COMMENTS: t.Dict[str, str] = {} 684 _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {} 685 _IDENTIFIERS: t.Dict[str, str] = {} 686 _IDENTIFIER_ESCAPES: t.Set[str] = set() 687 _QUOTES: t.Dict[str, str] = {} 688 _STRING_ESCAPES: t.Set[str] = set() 689 _KEYWORD_TRIE: t.Dict = {} 690 _RS_TOKENIZER: t.Optional[t.Any] = None 691 692 KEYWORDS: t.Dict[str, TokenType] = { 693 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, 694 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, 695 **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")}, 696 **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")}, 697 HINT_START: TokenType.HINT, 698 "==": TokenType.EQ, 699 "::": TokenType.DCOLON, 700 "||": TokenType.DPIPE, 701 "|>": TokenType.PIPE_GT, 702 ">=": TokenType.GTE, 703 "<=": TokenType.LTE, 704 "<>": TokenType.NEQ, 705 "!=": TokenType.NEQ, 706 ":=": TokenType.COLON_EQ, 707 "<=>": TokenType.NULLSAFE_EQ, 708 "->": TokenType.ARROW, 709 "->>": TokenType.DARROW, 710 "=>": TokenType.FARROW, 711 "#>": TokenType.HASH_ARROW, 712 "#>>": TokenType.DHASH_ARROW, 713 "<->": TokenType.LR_ARROW, 714 "&&": TokenType.DAMP, 715 "??": TokenType.DQMARK, 716 "~~~": TokenType.GLOB, 717 "~~": TokenType.LIKE, 718 "~~*": TokenType.ILIKE, 719 "~*": TokenType.IRLIKE, 720 "ALL": TokenType.ALL, 721 "AND": TokenType.AND, 722 "ANTI": TokenType.ANTI, 723 "ANY": TokenType.ANY, 724 "ASC": TokenType.ASC, 725 "AS": TokenType.ALIAS, 726 "ASOF": TokenType.ASOF, 727 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 728 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 729 "BEGIN": TokenType.BEGIN, 730 "BETWEEN": TokenType.BETWEEN, 731 "CACHE": TokenType.CACHE, 732 "UNCACHE": TokenType.UNCACHE, 733 "CASE": TokenType.CASE, 734 "CHARACTER SET": TokenType.CHARACTER_SET, 735 "CLUSTER BY": TokenType.CLUSTER_BY, 736 "COLLATE": TokenType.COLLATE, 737 "COLUMN": TokenType.COLUMN, 738 "COMMIT": TokenType.COMMIT, 739 "CONNECT BY": TokenType.CONNECT_BY, 740 "CONSTRAINT": TokenType.CONSTRAINT, 741 "COPY": TokenType.COPY, 742 "CREATE": TokenType.CREATE, 743 "CROSS": TokenType.CROSS, 744 "CUBE": TokenType.CUBE, 745 "CURRENT_DATE": TokenType.CURRENT_DATE, 746 "CURRENT_SCHEMA": TokenType.CURRENT_SCHEMA, 747 "CURRENT_TIME": TokenType.CURRENT_TIME, 748 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 749 "CURRENT_USER": TokenType.CURRENT_USER, 750 "DATABASE": TokenType.DATABASE, 751 "DEFAULT": TokenType.DEFAULT, 752 "DELETE": TokenType.DELETE, 753 "DESC": TokenType.DESC, 754 "DESCRIBE": TokenType.DESCRIBE, 755 "DISTINCT": TokenType.DISTINCT, 756 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 757 "DIV": TokenType.DIV, 758 "DROP": TokenType.DROP, 759 "ELSE": TokenType.ELSE, 760 "END": TokenType.END, 761 "ENUM": TokenType.ENUM, 762 "ESCAPE": TokenType.ESCAPE, 763 "EXCEPT": TokenType.EXCEPT, 764 "EXECUTE": TokenType.EXECUTE, 765 "EXISTS": TokenType.EXISTS, 766 "FALSE": TokenType.FALSE, 767 "FETCH": TokenType.FETCH, 768 "FILTER": TokenType.FILTER, 769 "FIRST": TokenType.FIRST, 770 "FULL": TokenType.FULL, 771 "FUNCTION": TokenType.FUNCTION, 772 "FOR": TokenType.FOR, 773 "FOREIGN KEY": TokenType.FOREIGN_KEY, 774 "FORMAT": TokenType.FORMAT, 775 "FROM": TokenType.FROM, 776 "GEOGRAPHY": TokenType.GEOGRAPHY, 777 "GEOMETRY": TokenType.GEOMETRY, 778 "GLOB": TokenType.GLOB, 779 "GROUP BY": TokenType.GROUP_BY, 780 "GROUPING SETS": TokenType.GROUPING_SETS, 781 "HAVING": TokenType.HAVING, 782 "ILIKE": TokenType.ILIKE, 783 "IN": TokenType.IN, 784 "INDEX": TokenType.INDEX, 785 "INET": TokenType.INET, 786 "INNER": TokenType.INNER, 787 "INSERT": TokenType.INSERT, 788 "INTERVAL": TokenType.INTERVAL, 789 "INTERSECT": TokenType.INTERSECT, 790 "INTO": TokenType.INTO, 791 "IS": TokenType.IS, 792 "ISNULL": TokenType.ISNULL, 793 "JOIN": TokenType.JOIN, 794 "KEEP": TokenType.KEEP, 795 "KILL": TokenType.KILL, 796 "LATERAL": TokenType.LATERAL, 797 "LEFT": TokenType.LEFT, 798 "LIKE": TokenType.LIKE, 799 "LIMIT": TokenType.LIMIT, 800 "LOAD": TokenType.LOAD, 801 "LOCK": TokenType.LOCK, 802 "MERGE": TokenType.MERGE, 803 "NAMESPACE": TokenType.NAMESPACE, 804 "NATURAL": TokenType.NATURAL, 805 "NEXT": TokenType.NEXT, 806 "NOT": TokenType.NOT, 807 "NOTNULL": TokenType.NOTNULL, 808 "NULL": TokenType.NULL, 809 "OBJECT": TokenType.OBJECT, 810 "OFFSET": TokenType.OFFSET, 811 "ON": TokenType.ON, 812 "OR": TokenType.OR, 813 "XOR": TokenType.XOR, 814 "ORDER BY": TokenType.ORDER_BY, 815 "ORDINALITY": TokenType.ORDINALITY, 816 "OUTER": TokenType.OUTER, 817 "OVER": TokenType.OVER, 818 "OVERLAPS": TokenType.OVERLAPS, 819 "OVERWRITE": TokenType.OVERWRITE, 820 "PARTITION": TokenType.PARTITION, 821 "PARTITION BY": TokenType.PARTITION_BY, 822 "PARTITIONED BY": TokenType.PARTITION_BY, 823 "PARTITIONED_BY": TokenType.PARTITION_BY, 824 "PERCENT": TokenType.PERCENT, 825 "PIVOT": TokenType.PIVOT, 826 "PRAGMA": TokenType.PRAGMA, 827 "PRIMARY KEY": TokenType.PRIMARY_KEY, 828 "PROCEDURE": TokenType.PROCEDURE, 829 "QUALIFY": TokenType.QUALIFY, 830 "RANGE": TokenType.RANGE, 831 "RECURSIVE": TokenType.RECURSIVE, 832 "REGEXP": TokenType.RLIKE, 833 "RENAME": TokenType.RENAME, 834 "REPLACE": TokenType.REPLACE, 835 "RETURNING": TokenType.RETURNING, 836 "REFERENCES": TokenType.REFERENCES, 837 "RIGHT": TokenType.RIGHT, 838 "RLIKE": TokenType.RLIKE, 839 "ROLLBACK": TokenType.ROLLBACK, 840 "ROLLUP": TokenType.ROLLUP, 841 "ROW": TokenType.ROW, 842 "ROWS": TokenType.ROWS, 843 "SCHEMA": TokenType.SCHEMA, 844 "SELECT": TokenType.SELECT, 845 "SEMI": TokenType.SEMI, 846 "SESSION": TokenType.SESSION, 847 "SET": TokenType.SET, 848 "SETTINGS": TokenType.SETTINGS, 849 "SHOW": TokenType.SHOW, 850 "SIMILAR TO": TokenType.SIMILAR_TO, 851 "SOME": TokenType.SOME, 852 "SORT BY": TokenType.SORT_BY, 853 "START WITH": TokenType.START_WITH, 854 "STRAIGHT_JOIN": TokenType.STRAIGHT_JOIN, 855 "TABLE": TokenType.TABLE, 856 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 857 "TEMP": TokenType.TEMPORARY, 858 "TEMPORARY": TokenType.TEMPORARY, 859 "THEN": TokenType.THEN, 860 "TRUE": TokenType.TRUE, 861 "TRUNCATE": TokenType.TRUNCATE, 862 "UNION": TokenType.UNION, 863 "UNKNOWN": TokenType.UNKNOWN, 864 "UNNEST": TokenType.UNNEST, 865 "UNPIVOT": TokenType.UNPIVOT, 866 "UPDATE": TokenType.UPDATE, 867 "USE": TokenType.USE, 868 "USING": TokenType.USING, 869 "UUID": TokenType.UUID, 870 "VALUES": TokenType.VALUES, 871 "VIEW": TokenType.VIEW, 872 "VOLATILE": TokenType.VOLATILE, 873 "WHEN": TokenType.WHEN, 874 "WHERE": TokenType.WHERE, 875 "WINDOW": TokenType.WINDOW, 876 "WITH": TokenType.WITH, 877 "APPLY": TokenType.APPLY, 878 "ARRAY": TokenType.ARRAY, 879 "BIT": TokenType.BIT, 880 "BOOL": TokenType.BOOLEAN, 881 "BOOLEAN": TokenType.BOOLEAN, 882 "BYTE": TokenType.TINYINT, 883 "MEDIUMINT": TokenType.MEDIUMINT, 884 "INT1": TokenType.TINYINT, 885 "TINYINT": TokenType.TINYINT, 886 "INT16": TokenType.SMALLINT, 887 "SHORT": TokenType.SMALLINT, 888 "SMALLINT": TokenType.SMALLINT, 889 "HUGEINT": TokenType.INT128, 890 "UHUGEINT": TokenType.UINT128, 891 "INT2": TokenType.SMALLINT, 892 "INTEGER": TokenType.INT, 893 "INT": TokenType.INT, 894 "INT4": TokenType.INT, 895 "INT32": TokenType.INT, 896 "INT64": TokenType.BIGINT, 897 "INT128": TokenType.INT128, 898 "INT256": TokenType.INT256, 899 "LONG": TokenType.BIGINT, 900 "BIGINT": TokenType.BIGINT, 901 "INT8": TokenType.TINYINT, 902 "UINT": TokenType.UINT, 903 "UINT128": TokenType.UINT128, 904 "UINT256": TokenType.UINT256, 905 "DEC": TokenType.DECIMAL, 906 "DECIMAL": TokenType.DECIMAL, 907 "DECIMAL32": TokenType.DECIMAL32, 908 "DECIMAL64": TokenType.DECIMAL64, 909 "DECIMAL128": TokenType.DECIMAL128, 910 "DECIMAL256": TokenType.DECIMAL256, 911 "BIGDECIMAL": TokenType.BIGDECIMAL, 912 "BIGNUMERIC": TokenType.BIGDECIMAL, 913 "LIST": TokenType.LIST, 914 "MAP": TokenType.MAP, 915 "NULLABLE": TokenType.NULLABLE, 916 "NUMBER": TokenType.DECIMAL, 917 "NUMERIC": TokenType.DECIMAL, 918 "FIXED": TokenType.DECIMAL, 919 "REAL": TokenType.FLOAT, 920 "FLOAT": TokenType.FLOAT, 921 "FLOAT4": TokenType.FLOAT, 922 "FLOAT8": TokenType.DOUBLE, 923 "DOUBLE": TokenType.DOUBLE, 924 "DOUBLE PRECISION": TokenType.DOUBLE, 925 "JSON": TokenType.JSON, 926 "JSONB": TokenType.JSONB, 927 "CHAR": TokenType.CHAR, 928 "CHARACTER": TokenType.CHAR, 929 "CHAR VARYING": TokenType.VARCHAR, 930 "CHARACTER VARYING": TokenType.VARCHAR, 931 "NCHAR": TokenType.NCHAR, 932 "VARCHAR": TokenType.VARCHAR, 933 "VARCHAR2": TokenType.VARCHAR, 934 "NVARCHAR": TokenType.NVARCHAR, 935 "NVARCHAR2": TokenType.NVARCHAR, 936 "BPCHAR": TokenType.BPCHAR, 937 "STR": TokenType.TEXT, 938 "STRING": TokenType.TEXT, 939 "TEXT": TokenType.TEXT, 940 "LONGTEXT": TokenType.LONGTEXT, 941 "MEDIUMTEXT": TokenType.MEDIUMTEXT, 942 "TINYTEXT": TokenType.TINYTEXT, 943 "CLOB": TokenType.TEXT, 944 "LONGVARCHAR": TokenType.TEXT, 945 "BINARY": TokenType.BINARY, 946 "BLOB": TokenType.VARBINARY, 947 "LONGBLOB": TokenType.LONGBLOB, 948 "MEDIUMBLOB": TokenType.MEDIUMBLOB, 949 "TINYBLOB": TokenType.TINYBLOB, 950 "BYTEA": TokenType.VARBINARY, 951 "VARBINARY": TokenType.VARBINARY, 952 "TIME": TokenType.TIME, 953 "TIMETZ": TokenType.TIMETZ, 954 "TIMESTAMP": TokenType.TIMESTAMP, 955 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 956 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 957 "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ, 958 "TIMESTAMPNTZ": TokenType.TIMESTAMPNTZ, 959 "TIMESTAMP_NTZ": TokenType.TIMESTAMPNTZ, 960 "DATE": TokenType.DATE, 961 "DATETIME": TokenType.DATETIME, 962 "INT4RANGE": TokenType.INT4RANGE, 963 "INT4MULTIRANGE": TokenType.INT4MULTIRANGE, 964 "INT8RANGE": TokenType.INT8RANGE, 965 "INT8MULTIRANGE": TokenType.INT8MULTIRANGE, 966 "NUMRANGE": TokenType.NUMRANGE, 967 "NUMMULTIRANGE": TokenType.NUMMULTIRANGE, 968 "TSRANGE": TokenType.TSRANGE, 969 "TSMULTIRANGE": TokenType.TSMULTIRANGE, 970 "TSTZRANGE": TokenType.TSTZRANGE, 971 "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE, 972 "DATERANGE": TokenType.DATERANGE, 973 "DATEMULTIRANGE": TokenType.DATEMULTIRANGE, 974 "UNIQUE": TokenType.UNIQUE, 975 "VECTOR": TokenType.VECTOR, 976 "STRUCT": TokenType.STRUCT, 977 "SEQUENCE": TokenType.SEQUENCE, 978 "VARIANT": TokenType.VARIANT, 979 "ALTER": TokenType.ALTER, 980 "ANALYZE": TokenType.ANALYZE, 981 "CALL": TokenType.COMMAND, 982 "COMMENT": TokenType.COMMENT, 983 "EXPLAIN": TokenType.COMMAND, 984 "GRANT": TokenType.GRANT, 985 "REVOKE": TokenType.REVOKE, 986 "OPTIMIZE": TokenType.COMMAND, 987 "PREPARE": TokenType.COMMAND, 988 "VACUUM": TokenType.COMMAND, 989 "USER-DEFINED": TokenType.USERDEFINED, 990 "FOR VERSION": TokenType.VERSION_SNAPSHOT, 991 "FOR TIMESTAMP": TokenType.TIMESTAMP_SNAPSHOT, 992 } 993 994 WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = { 995 " ": TokenType.SPACE, 996 "\t": TokenType.SPACE, 997 "\n": TokenType.BREAK, 998 "\r": TokenType.BREAK, 999 } 1000 1001 COMMANDS = { 1002 TokenType.COMMAND, 1003 TokenType.EXECUTE, 1004 TokenType.FETCH, 1005 TokenType.SHOW, 1006 TokenType.RENAME, 1007 } 1008 1009 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 1010 1011 # Handle numeric literals like in hive (3L = BIGINT) 1012 NUMERIC_LITERALS: t.Dict[str, str] = {} 1013 1014 COMMENTS = ["--", ("/*", "*/")] 1015 1016 __slots__ = ( 1017 "sql", 1018 "size", 1019 "tokens", 1020 "dialect", 1021 "use_rs_tokenizer", 1022 "_start", 1023 "_current", 1024 "_line", 1025 "_col", 1026 "_comments", 1027 "_char", 1028 "_end", 1029 "_peek", 1030 "_prev_token_line", 1031 "_rs_dialect_settings", 1032 ) 1033 1034 def __init__( 1035 self, 1036 dialect: DialectType = None, 1037 use_rs_tokenizer: t.Optional[bool] = None, 1038 **opts: t.Any, 1039 ) -> None: 1040 from sqlglot.dialects import Dialect 1041 1042 self.dialect = Dialect.get_or_raise(dialect) 1043 1044 # initialize `use_rs_tokenizer`, and allow it to be overwritten per Tokenizer instance 1045 self.use_rs_tokenizer = ( 1046 use_rs_tokenizer if use_rs_tokenizer is not None else USE_RS_TOKENIZER 1047 ) 1048 1049 if self.use_rs_tokenizer: 1050 self._rs_dialect_settings = RsTokenizerDialectSettings( 1051 unescaped_sequences=self.dialect.UNESCAPED_SEQUENCES, 1052 identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT, 1053 numbers_can_be_underscore_separated=self.dialect.NUMBERS_CAN_BE_UNDERSCORE_SEPARATED, 1054 ) 1055 1056 self.reset() 1057 1058 def reset(self) -> None: 1059 self.sql = "" 1060 self.size = 0 1061 self.tokens: t.List[Token] = [] 1062 self._start = 0 1063 self._current = 0 1064 self._line = 1 1065 self._col = 0 1066 self._comments: t.List[str] = [] 1067 1068 self._char = "" 1069 self._end = False 1070 self._peek = "" 1071 self._prev_token_line = -1 1072 1073 def tokenize(self, sql: str) -> t.List[Token]: 1074 """Returns a list of tokens corresponding to the SQL string `sql`.""" 1075 if self.use_rs_tokenizer: 1076 return self.tokenize_rs(sql) 1077 1078 self.reset() 1079 self.sql = sql 1080 self.size = len(sql) 1081 1082 try: 1083 self._scan() 1084 except Exception as e: 1085 start = max(self._current - 50, 0) 1086 end = min(self._current + 50, self.size - 1) 1087 context = self.sql[start:end] 1088 raise TokenError(f"Error tokenizing '{context}'") from e 1089 1090 return self.tokens 1091 1092 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 1093 while self.size and not self._end: 1094 current = self._current 1095 1096 # Skip spaces here rather than iteratively calling advance() for performance reasons 1097 while current < self.size: 1098 char = self.sql[current] 1099 1100 if char.isspace() and (char == " " or char == "\t"): 1101 current += 1 1102 else: 1103 break 1104 1105 offset = current - self._current if current > self._current else 1 1106 1107 self._start = current 1108 self._advance(offset) 1109 1110 if not self._char.isspace(): 1111 if self._char.isdigit(): 1112 self._scan_number() 1113 elif self._char in self._IDENTIFIERS: 1114 self._scan_identifier(self._IDENTIFIERS[self._char]) 1115 else: 1116 self._scan_keywords() 1117 1118 if until and until(): 1119 break 1120 1121 if self.tokens and self._comments: 1122 self.tokens[-1].comments.extend(self._comments) 1123 1124 def _chars(self, size: int) -> str: 1125 if size == 1: 1126 return self._char 1127 1128 start = self._current - 1 1129 end = start + size 1130 1131 return self.sql[start:end] if end <= self.size else "" 1132 1133 def _advance(self, i: int = 1, alnum: bool = False) -> None: 1134 if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: 1135 # Ensures we don't count an extra line if we get a \r\n line break sequence 1136 if not (self._char == "\r" and self._peek == "\n"): 1137 self._col = i 1138 self._line += 1 1139 else: 1140 self._col += i 1141 1142 self._current += i 1143 self._end = self._current >= self.size 1144 self._char = self.sql[self._current - 1] 1145 self._peek = "" if self._end else self.sql[self._current] 1146 1147 if alnum and self._char.isalnum(): 1148 # Here we use local variables instead of attributes for better performance 1149 _col = self._col 1150 _current = self._current 1151 _end = self._end 1152 _peek = self._peek 1153 1154 while _peek.isalnum(): 1155 _col += 1 1156 _current += 1 1157 _end = _current >= self.size 1158 _peek = "" if _end else self.sql[_current] 1159 1160 self._col = _col 1161 self._current = _current 1162 self._end = _end 1163 self._peek = _peek 1164 self._char = self.sql[_current - 1] 1165 1166 @property 1167 def _text(self) -> str: 1168 return self.sql[self._start : self._current] 1169 1170 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 1171 self._prev_token_line = self._line 1172 1173 if self._comments and token_type == TokenType.SEMICOLON and self.tokens: 1174 self.tokens[-1].comments.extend(self._comments) 1175 self._comments = [] 1176 1177 self.tokens.append( 1178 Token( 1179 token_type, 1180 text=self._text if text is None else text, 1181 line=self._line, 1182 col=self._col, 1183 start=self._start, 1184 end=self._current - 1, 1185 comments=self._comments, 1186 ) 1187 ) 1188 self._comments = [] 1189 1190 # If we have either a semicolon or a begin token before the command's token, we'll parse 1191 # whatever follows the command's token as a string 1192 if ( 1193 token_type in self.COMMANDS 1194 and self._peek != ";" 1195 and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS) 1196 ): 1197 start = self._current 1198 tokens = len(self.tokens) 1199 self._scan(lambda: self._peek == ";") 1200 self.tokens = self.tokens[:tokens] 1201 text = self.sql[start : self._current].strip() 1202 if text: 1203 self._add(TokenType.STRING, text) 1204 1205 def _scan_keywords(self) -> None: 1206 size = 0 1207 word = None 1208 chars = self._text 1209 char = chars 1210 prev_space = False 1211 skip = False 1212 trie = self._KEYWORD_TRIE 1213 single_token = char in self.SINGLE_TOKENS 1214 1215 while chars: 1216 if skip: 1217 result = TrieResult.PREFIX 1218 else: 1219 result, trie = in_trie(trie, char.upper()) 1220 1221 if result == TrieResult.FAILED: 1222 break 1223 if result == TrieResult.EXISTS: 1224 word = chars 1225 1226 end = self._current + size 1227 size += 1 1228 1229 if end < self.size: 1230 char = self.sql[end] 1231 single_token = single_token or char in self.SINGLE_TOKENS 1232 is_space = char.isspace() 1233 1234 if not is_space or not prev_space: 1235 if is_space: 1236 char = " " 1237 chars += char 1238 prev_space = is_space 1239 skip = False 1240 else: 1241 skip = True 1242 else: 1243 char = "" 1244 break 1245 1246 if word: 1247 if self._scan_string(word): 1248 return 1249 if self._scan_comment(word): 1250 return 1251 if prev_space or single_token or not char: 1252 self._advance(size - 1) 1253 word = word.upper() 1254 self._add(self.KEYWORDS[word], text=word) 1255 return 1256 1257 if self._char in self.SINGLE_TOKENS: 1258 self._add(self.SINGLE_TOKENS[self._char], text=self._char) 1259 return 1260 1261 self._scan_var() 1262 1263 def _scan_comment(self, comment_start: str) -> bool: 1264 if comment_start not in self._COMMENTS: 1265 return False 1266 1267 comment_start_line = self._line 1268 comment_start_size = len(comment_start) 1269 comment_end = self._COMMENTS[comment_start] 1270 1271 if comment_end: 1272 # Skip the comment's start delimiter 1273 self._advance(comment_start_size) 1274 1275 comment_count = 1 1276 comment_end_size = len(comment_end) 1277 1278 while not self._end: 1279 if self._chars(comment_end_size) == comment_end: 1280 comment_count -= 1 1281 if not comment_count: 1282 break 1283 1284 self._advance(alnum=True) 1285 1286 # Nested comments are allowed by some dialects, e.g. databricks, duckdb, postgres 1287 if ( 1288 self.NESTED_COMMENTS 1289 and not self._end 1290 and self._chars(comment_end_size) == comment_start 1291 ): 1292 self._advance(comment_start_size) 1293 comment_count += 1 1294 1295 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) 1296 self._advance(comment_end_size - 1) 1297 else: 1298 while not self._end and self.WHITE_SPACE.get(self._peek) is not TokenType.BREAK: 1299 self._advance(alnum=True) 1300 self._comments.append(self._text[comment_start_size:]) 1301 1302 if ( 1303 comment_start == self.HINT_START 1304 and self.tokens 1305 and self.tokens[-1].token_type in self.TOKENS_PRECEDING_HINT 1306 ): 1307 self._add(TokenType.HINT) 1308 1309 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 1310 # Multiple consecutive comments are preserved by appending them to the current comments list. 1311 if comment_start_line == self._prev_token_line: 1312 self.tokens[-1].comments.extend(self._comments) 1313 self._comments = [] 1314 self._prev_token_line = self._line 1315 1316 return True 1317 1318 def _scan_number(self) -> None: 1319 if self._char == "0": 1320 peek = self._peek.upper() 1321 if peek == "B": 1322 return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER) 1323 elif peek == "X": 1324 return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER) 1325 1326 decimal = False 1327 scientific = 0 1328 1329 while True: 1330 if self._peek.isdigit(): 1331 self._advance() 1332 elif self._peek == "." and not decimal: 1333 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER: 1334 return self._add(TokenType.NUMBER) 1335 decimal = True 1336 self._advance() 1337 elif self._peek in ("-", "+") and scientific == 1: 1338 scientific += 1 1339 self._advance() 1340 elif self._peek.upper() == "E" and not scientific: 1341 scientific += 1 1342 self._advance() 1343 elif self._peek.isidentifier(): 1344 number_text = self._text 1345 literal = "" 1346 1347 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: 1348 literal += self._peek 1349 self._advance() 1350 1351 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal.upper(), "")) 1352 1353 if token_type: 1354 self._add(TokenType.NUMBER, number_text) 1355 self._add(TokenType.DCOLON, "::") 1356 return self._add(token_type, literal) 1357 else: 1358 replaced = literal.replace("_", "") 1359 if self.dialect.NUMBERS_CAN_BE_UNDERSCORE_SEPARATED and replaced.isdigit(): 1360 return self._add(TokenType.NUMBER, number_text + replaced) 1361 if self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT: 1362 return self._add(TokenType.VAR) 1363 1364 self._advance(-len(literal)) 1365 return self._add(TokenType.NUMBER, number_text) 1366 else: 1367 return self._add(TokenType.NUMBER) 1368 1369 def _scan_bits(self) -> None: 1370 self._advance() 1371 value = self._extract_value() 1372 try: 1373 # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier 1374 int(value, 2) 1375 self._add(TokenType.BIT_STRING, value[2:]) # Drop the 0b 1376 except ValueError: 1377 self._add(TokenType.IDENTIFIER) 1378 1379 def _scan_hex(self) -> None: 1380 self._advance() 1381 value = self._extract_value() 1382 try: 1383 # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier 1384 int(value, 16) 1385 self._add(TokenType.HEX_STRING, value[2:]) # Drop the 0x 1386 except ValueError: 1387 self._add(TokenType.IDENTIFIER) 1388 1389 def _extract_value(self) -> str: 1390 while True: 1391 char = self._peek.strip() 1392 if char and char not in self.SINGLE_TOKENS: 1393 self._advance(alnum=True) 1394 else: 1395 break 1396 1397 return self._text 1398 1399 def _scan_string(self, start: str) -> bool: 1400 base = None 1401 token_type = TokenType.STRING 1402 1403 if start in self._QUOTES: 1404 end = self._QUOTES[start] 1405 elif start in self._FORMAT_STRINGS: 1406 end, token_type = self._FORMAT_STRINGS[start] 1407 1408 if token_type == TokenType.HEX_STRING: 1409 base = 16 1410 elif token_type == TokenType.BIT_STRING: 1411 base = 2 1412 elif token_type == TokenType.HEREDOC_STRING: 1413 self._advance() 1414 1415 if self._char == end: 1416 tag = "" 1417 else: 1418 tag = self._extract_string( 1419 end, 1420 raw_string=True, 1421 raise_unmatched=not self.HEREDOC_TAG_IS_IDENTIFIER, 1422 ) 1423 1424 if tag and self.HEREDOC_TAG_IS_IDENTIFIER and (self._end or not tag.isidentifier()): 1425 if not self._end: 1426 self._advance(-1) 1427 1428 self._advance(-len(tag)) 1429 self._add(self.HEREDOC_STRING_ALTERNATIVE) 1430 return True 1431 1432 end = f"{start}{tag}{end}" 1433 else: 1434 return False 1435 1436 self._advance(len(start)) 1437 text = self._extract_string(end, raw_string=token_type == TokenType.RAW_STRING) 1438 1439 if base and text: 1440 try: 1441 int(text, base) 1442 except Exception: 1443 raise TokenError( 1444 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1445 ) 1446 1447 self._add(token_type, text) 1448 return True 1449 1450 def _scan_identifier(self, identifier_end: str) -> None: 1451 self._advance() 1452 text = self._extract_string( 1453 identifier_end, escapes=self._IDENTIFIER_ESCAPES | {identifier_end} 1454 ) 1455 self._add(TokenType.IDENTIFIER, text) 1456 1457 def _scan_var(self) -> None: 1458 while True: 1459 char = self._peek.strip() 1460 if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS): 1461 self._advance(alnum=True) 1462 else: 1463 break 1464 1465 self._add( 1466 TokenType.VAR 1467 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER 1468 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1469 ) 1470 1471 def _extract_string( 1472 self, 1473 delimiter: str, 1474 escapes: t.Optional[t.Set[str]] = None, 1475 raw_string: bool = False, 1476 raise_unmatched: bool = True, 1477 ) -> str: 1478 text = "" 1479 delim_size = len(delimiter) 1480 escapes = self._STRING_ESCAPES if escapes is None else escapes 1481 1482 while True: 1483 if ( 1484 not raw_string 1485 and self.dialect.UNESCAPED_SEQUENCES 1486 and self._peek 1487 and self._char in self.STRING_ESCAPES 1488 ): 1489 unescaped_sequence = self.dialect.UNESCAPED_SEQUENCES.get(self._char + self._peek) 1490 if unescaped_sequence: 1491 self._advance(2) 1492 text += unescaped_sequence 1493 continue 1494 if ( 1495 (self.STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS or not raw_string) 1496 and self._char in escapes 1497 and (self._peek == delimiter or self._peek in escapes) 1498 and (self._char not in self._QUOTES or self._char == self._peek) 1499 ): 1500 if self._peek == delimiter: 1501 text += self._peek 1502 else: 1503 text += self._char + self._peek 1504 1505 if self._current + 1 < self.size: 1506 self._advance(2) 1507 else: 1508 raise TokenError(f"Missing {delimiter} from {self._line}:{self._current}") 1509 else: 1510 if self._chars(delim_size) == delimiter: 1511 if delim_size > 1: 1512 self._advance(delim_size - 1) 1513 break 1514 1515 if self._end: 1516 if not raise_unmatched: 1517 return text + self._char 1518 1519 raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}") 1520 1521 current = self._current - 1 1522 self._advance(alnum=True) 1523 text += self.sql[current : self._current - 1] 1524 1525 return text 1526 1527 def tokenize_rs(self, sql: str) -> t.List[Token]: 1528 if not self._RS_TOKENIZER: 1529 raise SqlglotError("Rust tokenizer is not available") 1530 1531 tokens, error_msg = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings) 1532 for token in tokens: 1533 token.token_type = _ALL_TOKEN_TYPES[token.token_type_index] 1534 1535 # Setting this here so partial token lists can be inspected even if there is a failure 1536 self.tokens = tokens 1537 1538 if error_msg is not None: 1539 raise TokenError(error_msg) 1540 1541 return tokens
29class TokenType(AutoName): 30 L_PAREN = auto() 31 R_PAREN = auto() 32 L_BRACKET = auto() 33 R_BRACKET = auto() 34 L_BRACE = auto() 35 R_BRACE = auto() 36 COMMA = auto() 37 DOT = auto() 38 DASH = auto() 39 PLUS = auto() 40 COLON = auto() 41 DOTCOLON = auto() 42 DCOLON = auto() 43 DCOLONDOLLAR = auto() 44 DCOLONPERCENT = auto() 45 DQMARK = auto() 46 SEMICOLON = auto() 47 STAR = auto() 48 BACKSLASH = auto() 49 SLASH = auto() 50 LT = auto() 51 LTE = auto() 52 GT = auto() 53 GTE = auto() 54 NOT = auto() 55 EQ = auto() 56 NEQ = auto() 57 NULLSAFE_EQ = auto() 58 COLON_EQ = auto() 59 COLON_GT = auto() 60 NCOLON_GT = auto() 61 AND = auto() 62 OR = auto() 63 AMP = auto() 64 DPIPE = auto() 65 PIPE_GT = auto() 66 PIPE = auto() 67 PIPE_SLASH = auto() 68 DPIPE_SLASH = auto() 69 CARET = auto() 70 CARET_AT = auto() 71 TILDA = auto() 72 ARROW = auto() 73 DARROW = auto() 74 FARROW = auto() 75 HASH = auto() 76 HASH_ARROW = auto() 77 DHASH_ARROW = auto() 78 LR_ARROW = auto() 79 DAT = auto() 80 LT_AT = auto() 81 AT_GT = auto() 82 DOLLAR = auto() 83 PARAMETER = auto() 84 SESSION = auto() 85 SESSION_PARAMETER = auto() 86 DAMP = auto() 87 XOR = auto() 88 DSTAR = auto() 89 QMARK_AMP = auto() 90 QMARK_PIPE = auto() 91 HASH_DASH = auto() 92 EXCLAMATION = auto() 93 94 URI_START = auto() 95 96 BLOCK_START = auto() 97 BLOCK_END = auto() 98 99 SPACE = auto() 100 BREAK = auto() 101 102 STRING = auto() 103 NUMBER = auto() 104 IDENTIFIER = auto() 105 DATABASE = auto() 106 COLUMN = auto() 107 COLUMN_DEF = auto() 108 SCHEMA = auto() 109 TABLE = auto() 110 WAREHOUSE = auto() 111 STAGE = auto() 112 STREAMLIT = auto() 113 VAR = auto() 114 BIT_STRING = auto() 115 HEX_STRING = auto() 116 BYTE_STRING = auto() 117 NATIONAL_STRING = auto() 118 RAW_STRING = auto() 119 HEREDOC_STRING = auto() 120 UNICODE_STRING = auto() 121 122 # types 123 BIT = auto() 124 BOOLEAN = auto() 125 TINYINT = auto() 126 UTINYINT = auto() 127 SMALLINT = auto() 128 USMALLINT = auto() 129 MEDIUMINT = auto() 130 UMEDIUMINT = auto() 131 INT = auto() 132 UINT = auto() 133 BIGINT = auto() 134 UBIGINT = auto() 135 INT128 = auto() 136 UINT128 = auto() 137 INT256 = auto() 138 UINT256 = auto() 139 FLOAT = auto() 140 DOUBLE = auto() 141 UDOUBLE = auto() 142 DECIMAL = auto() 143 DECIMAL32 = auto() 144 DECIMAL64 = auto() 145 DECIMAL128 = auto() 146 DECIMAL256 = auto() 147 UDECIMAL = auto() 148 BIGDECIMAL = auto() 149 CHAR = auto() 150 NCHAR = auto() 151 VARCHAR = auto() 152 NVARCHAR = auto() 153 BPCHAR = auto() 154 TEXT = auto() 155 MEDIUMTEXT = auto() 156 LONGTEXT = auto() 157 BLOB = auto() 158 MEDIUMBLOB = auto() 159 LONGBLOB = auto() 160 TINYBLOB = auto() 161 TINYTEXT = auto() 162 NAME = auto() 163 BINARY = auto() 164 VARBINARY = auto() 165 JSON = auto() 166 JSONB = auto() 167 TIME = auto() 168 TIMETZ = auto() 169 TIMESTAMP = auto() 170 TIMESTAMPTZ = auto() 171 TIMESTAMPLTZ = auto() 172 TIMESTAMPNTZ = auto() 173 TIMESTAMP_S = auto() 174 TIMESTAMP_MS = auto() 175 TIMESTAMP_NS = auto() 176 DATETIME = auto() 177 DATETIME2 = auto() 178 DATETIME64 = auto() 179 SMALLDATETIME = auto() 180 DATE = auto() 181 DATE32 = auto() 182 INT4RANGE = auto() 183 INT4MULTIRANGE = auto() 184 INT8RANGE = auto() 185 INT8MULTIRANGE = auto() 186 NUMRANGE = auto() 187 NUMMULTIRANGE = auto() 188 TSRANGE = auto() 189 TSMULTIRANGE = auto() 190 TSTZRANGE = auto() 191 TSTZMULTIRANGE = auto() 192 DATERANGE = auto() 193 DATEMULTIRANGE = auto() 194 UUID = auto() 195 GEOGRAPHY = auto() 196 GEOGRAPHYPOINT = auto() 197 NULLABLE = auto() 198 GEOMETRY = auto() 199 POINT = auto() 200 RING = auto() 201 LINESTRING = auto() 202 MULTILINESTRING = auto() 203 POLYGON = auto() 204 MULTIPOLYGON = auto() 205 HLLSKETCH = auto() 206 HSTORE = auto() 207 SUPER = auto() 208 SERIAL = auto() 209 SMALLSERIAL = auto() 210 BIGSERIAL = auto() 211 XML = auto() 212 YEAR = auto() 213 USERDEFINED = auto() 214 MONEY = auto() 215 SMALLMONEY = auto() 216 ROWVERSION = auto() 217 IMAGE = auto() 218 VARIANT = auto() 219 OBJECT = auto() 220 INET = auto() 221 IPADDRESS = auto() 222 IPPREFIX = auto() 223 IPV4 = auto() 224 IPV6 = auto() 225 ENUM = auto() 226 ENUM8 = auto() 227 ENUM16 = auto() 228 FIXEDSTRING = auto() 229 LOWCARDINALITY = auto() 230 NESTED = auto() 231 AGGREGATEFUNCTION = auto() 232 SIMPLEAGGREGATEFUNCTION = auto() 233 TDIGEST = auto() 234 UNKNOWN = auto() 235 VECTOR = auto() 236 DYNAMIC = auto() 237 VOID = auto() 238 239 # keywords 240 ALIAS = auto() 241 ALTER = auto() 242 ALL = auto() 243 ANTI = auto() 244 ANY = auto() 245 APPLY = auto() 246 ARRAY = auto() 247 ASC = auto() 248 ASOF = auto() 249 ATTACH = auto() 250 AUTO_INCREMENT = auto() 251 BEGIN = auto() 252 BETWEEN = auto() 253 BULK_COLLECT_INTO = auto() 254 CACHE = auto() 255 CASE = auto() 256 CHARACTER_SET = auto() 257 CLUSTER_BY = auto() 258 COLLATE = auto() 259 COMMAND = auto() 260 COMMENT = auto() 261 COMMIT = auto() 262 CONNECT_BY = auto() 263 CONSTRAINT = auto() 264 COPY = auto() 265 CREATE = auto() 266 CROSS = auto() 267 CUBE = auto() 268 CURRENT_DATE = auto() 269 CURRENT_DATETIME = auto() 270 CURRENT_SCHEMA = auto() 271 CURRENT_TIME = auto() 272 CURRENT_TIMESTAMP = auto() 273 CURRENT_USER = auto() 274 DECLARE = auto() 275 DEFAULT = auto() 276 DELETE = auto() 277 DESC = auto() 278 DESCRIBE = auto() 279 DETACH = auto() 280 DICTIONARY = auto() 281 DISTINCT = auto() 282 DISTRIBUTE_BY = auto() 283 DIV = auto() 284 DROP = auto() 285 ELSE = auto() 286 END = auto() 287 ESCAPE = auto() 288 EXCEPT = auto() 289 EXECUTE = auto() 290 EXISTS = auto() 291 FALSE = auto() 292 FETCH = auto() 293 FILE_FORMAT = auto() 294 FILTER = auto() 295 FINAL = auto() 296 FIRST = auto() 297 FOR = auto() 298 FORCE = auto() 299 FOREIGN_KEY = auto() 300 FORMAT = auto() 301 FROM = auto() 302 FULL = auto() 303 FUNCTION = auto() 304 GET = auto() 305 GLOB = auto() 306 GLOBAL = auto() 307 GRANT = auto() 308 GROUP_BY = auto() 309 GROUPING_SETS = auto() 310 HAVING = auto() 311 HINT = auto() 312 IGNORE = auto() 313 ILIKE = auto() 314 IN = auto() 315 INDEX = auto() 316 INNER = auto() 317 INSERT = auto() 318 INSTALL = auto() 319 INTERSECT = auto() 320 INTERVAL = auto() 321 INTO = auto() 322 INTRODUCER = auto() 323 IRLIKE = auto() 324 IS = auto() 325 ISNULL = auto() 326 JOIN = auto() 327 JOIN_MARKER = auto() 328 KEEP = auto() 329 KEY = auto() 330 KILL = auto() 331 LANGUAGE = auto() 332 LATERAL = auto() 333 LEFT = auto() 334 LIKE = auto() 335 LIMIT = auto() 336 LIST = auto() 337 LOAD = auto() 338 LOCK = auto() 339 MAP = auto() 340 MATCH_CONDITION = auto() 341 MATCH_RECOGNIZE = auto() 342 MEMBER_OF = auto() 343 MERGE = auto() 344 MOD = auto() 345 MODEL = auto() 346 NATURAL = auto() 347 NEXT = auto() 348 NOTHING = auto() 349 NOTNULL = auto() 350 NULL = auto() 351 OBJECT_IDENTIFIER = auto() 352 OFFSET = auto() 353 ON = auto() 354 ONLY = auto() 355 OPERATOR = auto() 356 ORDER_BY = auto() 357 ORDER_SIBLINGS_BY = auto() 358 ORDERED = auto() 359 ORDINALITY = auto() 360 OUTER = auto() 361 OVER = auto() 362 OVERLAPS = auto() 363 OVERWRITE = auto() 364 PARTITION = auto() 365 PARTITION_BY = auto() 366 PERCENT = auto() 367 PIVOT = auto() 368 PLACEHOLDER = auto() 369 POSITIONAL = auto() 370 PRAGMA = auto() 371 PREWHERE = auto() 372 PRIMARY_KEY = auto() 373 PROCEDURE = auto() 374 PROPERTIES = auto() 375 PSEUDO_TYPE = auto() 376 PUT = auto() 377 QUALIFY = auto() 378 QUOTE = auto() 379 RANGE = auto() 380 RECURSIVE = auto() 381 REFRESH = auto() 382 RENAME = auto() 383 REPLACE = auto() 384 RETURNING = auto() 385 REVOKE = auto() 386 REFERENCES = auto() 387 RIGHT = auto() 388 RLIKE = auto() 389 ROLLBACK = auto() 390 ROLLUP = auto() 391 ROW = auto() 392 ROWS = auto() 393 SELECT = auto() 394 SEMI = auto() 395 SEPARATOR = auto() 396 SEQUENCE = auto() 397 SERDE_PROPERTIES = auto() 398 SET = auto() 399 SETTINGS = auto() 400 SHOW = auto() 401 SIMILAR_TO = auto() 402 SOME = auto() 403 SORT_BY = auto() 404 SOUNDS_LIKE = auto() 405 START_WITH = auto() 406 STORAGE_INTEGRATION = auto() 407 STRAIGHT_JOIN = auto() 408 STRUCT = auto() 409 SUMMARIZE = auto() 410 TABLE_SAMPLE = auto() 411 TAG = auto() 412 TEMPORARY = auto() 413 TOP = auto() 414 THEN = auto() 415 TRUE = auto() 416 TRUNCATE = auto() 417 UNCACHE = auto() 418 UNION = auto() 419 UNNEST = auto() 420 UNPIVOT = auto() 421 UPDATE = auto() 422 USE = auto() 423 USING = auto() 424 VALUES = auto() 425 VIEW = auto() 426 SEMANTIC_VIEW = auto() 427 VOLATILE = auto() 428 WHEN = auto() 429 WHERE = auto() 430 WINDOW = auto() 431 WITH = auto() 432 UNIQUE = auto() 433 UTC_DATE = auto() 434 UTC_TIME = auto() 435 UTC_TIMESTAMP = auto() 436 VERSION_SNAPSHOT = auto() 437 TIMESTAMP_SNAPSHOT = auto() 438 OPTION = auto() 439 SINK = auto() 440 SOURCE = auto() 441 ANALYZE = auto() 442 NAMESPACE = auto() 443 EXPORT = auto() 444 445 # sentinel 446 HIVE_TOKEN_STREAM = auto()
An enumeration.
L_PAREN =
<TokenType.L_PAREN: 'L_PAREN'>
R_PAREN =
<TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET =
<TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET =
<TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE =
<TokenType.L_BRACE: 'L_BRACE'>
R_BRACE =
<TokenType.R_BRACE: 'R_BRACE'>
COMMA =
<TokenType.COMMA: 'COMMA'>
DOT =
<TokenType.DOT: 'DOT'>
DASH =
<TokenType.DASH: 'DASH'>
PLUS =
<TokenType.PLUS: 'PLUS'>
COLON =
<TokenType.COLON: 'COLON'>
DOTCOLON =
<TokenType.DOTCOLON: 'DOTCOLON'>
DCOLON =
<TokenType.DCOLON: 'DCOLON'>
DCOLONDOLLAR =
<TokenType.DCOLONDOLLAR: 'DCOLONDOLLAR'>
DCOLONPERCENT =
<TokenType.DCOLONPERCENT: 'DCOLONPERCENT'>
DQMARK =
<TokenType.DQMARK: 'DQMARK'>
SEMICOLON =
<TokenType.SEMICOLON: 'SEMICOLON'>
STAR =
<TokenType.STAR: 'STAR'>
BACKSLASH =
<TokenType.BACKSLASH: 'BACKSLASH'>
SLASH =
<TokenType.SLASH: 'SLASH'>
LT =
<TokenType.LT: 'LT'>
LTE =
<TokenType.LTE: 'LTE'>
GT =
<TokenType.GT: 'GT'>
GTE =
<TokenType.GTE: 'GTE'>
NOT =
<TokenType.NOT: 'NOT'>
EQ =
<TokenType.EQ: 'EQ'>
NEQ =
<TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ =
<TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
COLON_EQ =
<TokenType.COLON_EQ: 'COLON_EQ'>
COLON_GT =
<TokenType.COLON_GT: 'COLON_GT'>
NCOLON_GT =
<TokenType.NCOLON_GT: 'NCOLON_GT'>
AND =
<TokenType.AND: 'AND'>
OR =
<TokenType.OR: 'OR'>
AMP =
<TokenType.AMP: 'AMP'>
DPIPE =
<TokenType.DPIPE: 'DPIPE'>
PIPE_GT =
<TokenType.PIPE_GT: 'PIPE_GT'>
PIPE =
<TokenType.PIPE: 'PIPE'>
PIPE_SLASH =
<TokenType.PIPE_SLASH: 'PIPE_SLASH'>
DPIPE_SLASH =
<TokenType.DPIPE_SLASH: 'DPIPE_SLASH'>
CARET =
<TokenType.CARET: 'CARET'>
CARET_AT =
<TokenType.CARET_AT: 'CARET_AT'>
TILDA =
<TokenType.TILDA: 'TILDA'>
ARROW =
<TokenType.ARROW: 'ARROW'>
DARROW =
<TokenType.DARROW: 'DARROW'>
FARROW =
<TokenType.FARROW: 'FARROW'>
HASH =
<TokenType.HASH: 'HASH'>
HASH_ARROW =
<TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW =
<TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW =
<TokenType.LR_ARROW: 'LR_ARROW'>
DAT =
<TokenType.DAT: 'DAT'>
LT_AT =
<TokenType.LT_AT: 'LT_AT'>
AT_GT =
<TokenType.AT_GT: 'AT_GT'>
DOLLAR =
<TokenType.DOLLAR: 'DOLLAR'>
PARAMETER =
<TokenType.PARAMETER: 'PARAMETER'>
SESSION =
<TokenType.SESSION: 'SESSION'>
SESSION_PARAMETER =
<TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
DAMP =
<TokenType.DAMP: 'DAMP'>
XOR =
<TokenType.XOR: 'XOR'>
DSTAR =
<TokenType.DSTAR: 'DSTAR'>
QMARK_AMP =
<TokenType.QMARK_AMP: 'QMARK_AMP'>
QMARK_PIPE =
<TokenType.QMARK_PIPE: 'QMARK_PIPE'>
HASH_DASH =
<TokenType.HASH_DASH: 'HASH_DASH'>
EXCLAMATION =
<TokenType.EXCLAMATION: 'EXCLAMATION'>
URI_START =
<TokenType.URI_START: 'URI_START'>
BLOCK_START =
<TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END =
<TokenType.BLOCK_END: 'BLOCK_END'>
SPACE =
<TokenType.SPACE: 'SPACE'>
BREAK =
<TokenType.BREAK: 'BREAK'>
STRING =
<TokenType.STRING: 'STRING'>
NUMBER =
<TokenType.NUMBER: 'NUMBER'>
IDENTIFIER =
<TokenType.IDENTIFIER: 'IDENTIFIER'>
DATABASE =
<TokenType.DATABASE: 'DATABASE'>
COLUMN =
<TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF =
<TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA =
<TokenType.SCHEMA: 'SCHEMA'>
TABLE =
<TokenType.TABLE: 'TABLE'>
WAREHOUSE =
<TokenType.WAREHOUSE: 'WAREHOUSE'>
STAGE =
<TokenType.STAGE: 'STAGE'>
STREAMLIT =
<TokenType.STREAMLIT: 'STREAMLIT'>
VAR =
<TokenType.VAR: 'VAR'>
BIT_STRING =
<TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING =
<TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING =
<TokenType.BYTE_STRING: 'BYTE_STRING'>
NATIONAL_STRING =
<TokenType.NATIONAL_STRING: 'NATIONAL_STRING'>
RAW_STRING =
<TokenType.RAW_STRING: 'RAW_STRING'>
HEREDOC_STRING =
<TokenType.HEREDOC_STRING: 'HEREDOC_STRING'>
UNICODE_STRING =
<TokenType.UNICODE_STRING: 'UNICODE_STRING'>
BIT =
<TokenType.BIT: 'BIT'>
BOOLEAN =
<TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT =
<TokenType.TINYINT: 'TINYINT'>
UTINYINT =
<TokenType.UTINYINT: 'UTINYINT'>
SMALLINT =
<TokenType.SMALLINT: 'SMALLINT'>
USMALLINT =
<TokenType.USMALLINT: 'USMALLINT'>
MEDIUMINT =
<TokenType.MEDIUMINT: 'MEDIUMINT'>
UMEDIUMINT =
<TokenType.UMEDIUMINT: 'UMEDIUMINT'>
INT =
<TokenType.INT: 'INT'>
UINT =
<TokenType.UINT: 'UINT'>
BIGINT =
<TokenType.BIGINT: 'BIGINT'>
UBIGINT =
<TokenType.UBIGINT: 'UBIGINT'>
INT128 =
<TokenType.INT128: 'INT128'>
UINT128 =
<TokenType.UINT128: 'UINT128'>
INT256 =
<TokenType.INT256: 'INT256'>
UINT256 =
<TokenType.UINT256: 'UINT256'>
FLOAT =
<TokenType.FLOAT: 'FLOAT'>
DOUBLE =
<TokenType.DOUBLE: 'DOUBLE'>
UDOUBLE =
<TokenType.UDOUBLE: 'UDOUBLE'>
DECIMAL =
<TokenType.DECIMAL: 'DECIMAL'>
DECIMAL32 =
<TokenType.DECIMAL32: 'DECIMAL32'>
DECIMAL64 =
<TokenType.DECIMAL64: 'DECIMAL64'>
DECIMAL128 =
<TokenType.DECIMAL128: 'DECIMAL128'>
DECIMAL256 =
<TokenType.DECIMAL256: 'DECIMAL256'>
UDECIMAL =
<TokenType.UDECIMAL: 'UDECIMAL'>
BIGDECIMAL =
<TokenType.BIGDECIMAL: 'BIGDECIMAL'>
CHAR =
<TokenType.CHAR: 'CHAR'>
NCHAR =
<TokenType.NCHAR: 'NCHAR'>
VARCHAR =
<TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR =
<TokenType.NVARCHAR: 'NVARCHAR'>
BPCHAR =
<TokenType.BPCHAR: 'BPCHAR'>
TEXT =
<TokenType.TEXT: 'TEXT'>
MEDIUMTEXT =
<TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT =
<TokenType.LONGTEXT: 'LONGTEXT'>
BLOB =
<TokenType.BLOB: 'BLOB'>
MEDIUMBLOB =
<TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB =
<TokenType.LONGBLOB: 'LONGBLOB'>
TINYBLOB =
<TokenType.TINYBLOB: 'TINYBLOB'>
TINYTEXT =
<TokenType.TINYTEXT: 'TINYTEXT'>
NAME =
<TokenType.NAME: 'NAME'>
BINARY =
<TokenType.BINARY: 'BINARY'>
VARBINARY =
<TokenType.VARBINARY: 'VARBINARY'>
JSON =
<TokenType.JSON: 'JSON'>
JSONB =
<TokenType.JSONB: 'JSONB'>
TIME =
<TokenType.TIME: 'TIME'>
TIMETZ =
<TokenType.TIMETZ: 'TIMETZ'>
TIMESTAMP =
<TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ =
<TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ =
<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
TIMESTAMPNTZ =
<TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>
TIMESTAMP_S =
<TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>
TIMESTAMP_MS =
<TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>
TIMESTAMP_NS =
<TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>
DATETIME =
<TokenType.DATETIME: 'DATETIME'>
DATETIME2 =
<TokenType.DATETIME2: 'DATETIME2'>
DATETIME64 =
<TokenType.DATETIME64: 'DATETIME64'>
SMALLDATETIME =
<TokenType.SMALLDATETIME: 'SMALLDATETIME'>
DATE =
<TokenType.DATE: 'DATE'>
DATE32 =
<TokenType.DATE32: 'DATE32'>
INT4RANGE =
<TokenType.INT4RANGE: 'INT4RANGE'>
INT4MULTIRANGE =
<TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>
INT8RANGE =
<TokenType.INT8RANGE: 'INT8RANGE'>
INT8MULTIRANGE =
<TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>
NUMRANGE =
<TokenType.NUMRANGE: 'NUMRANGE'>
NUMMULTIRANGE =
<TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>
TSRANGE =
<TokenType.TSRANGE: 'TSRANGE'>
TSMULTIRANGE =
<TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>
TSTZRANGE =
<TokenType.TSTZRANGE: 'TSTZRANGE'>
TSTZMULTIRANGE =
<TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>
DATERANGE =
<TokenType.DATERANGE: 'DATERANGE'>
DATEMULTIRANGE =
<TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>
UUID =
<TokenType.UUID: 'UUID'>
GEOGRAPHY =
<TokenType.GEOGRAPHY: 'GEOGRAPHY'>
GEOGRAPHYPOINT =
<TokenType.GEOGRAPHYPOINT: 'GEOGRAPHYPOINT'>
NULLABLE =
<TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY =
<TokenType.GEOMETRY: 'GEOMETRY'>
POINT =
<TokenType.POINT: 'POINT'>
RING =
<TokenType.RING: 'RING'>
LINESTRING =
<TokenType.LINESTRING: 'LINESTRING'>
MULTILINESTRING =
<TokenType.MULTILINESTRING: 'MULTILINESTRING'>
POLYGON =
<TokenType.POLYGON: 'POLYGON'>
MULTIPOLYGON =
<TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>
HLLSKETCH =
<TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE =
<TokenType.HSTORE: 'HSTORE'>
SUPER =
<TokenType.SUPER: 'SUPER'>
SERIAL =
<TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL =
<TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL =
<TokenType.BIGSERIAL: 'BIGSERIAL'>
XML =
<TokenType.XML: 'XML'>
YEAR =
<TokenType.YEAR: 'YEAR'>
USERDEFINED =
<TokenType.USERDEFINED: 'USERDEFINED'>
MONEY =
<TokenType.MONEY: 'MONEY'>
SMALLMONEY =
<TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION =
<TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE =
<TokenType.IMAGE: 'IMAGE'>
VARIANT =
<TokenType.VARIANT: 'VARIANT'>
OBJECT =
<TokenType.OBJECT: 'OBJECT'>
INET =
<TokenType.INET: 'INET'>
IPADDRESS =
<TokenType.IPADDRESS: 'IPADDRESS'>
IPPREFIX =
<TokenType.IPPREFIX: 'IPPREFIX'>
IPV4 =
<TokenType.IPV4: 'IPV4'>
IPV6 =
<TokenType.IPV6: 'IPV6'>
ENUM =
<TokenType.ENUM: 'ENUM'>
ENUM8 =
<TokenType.ENUM8: 'ENUM8'>
ENUM16 =
<TokenType.ENUM16: 'ENUM16'>
FIXEDSTRING =
<TokenType.FIXEDSTRING: 'FIXEDSTRING'>
LOWCARDINALITY =
<TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>
NESTED =
<TokenType.NESTED: 'NESTED'>
AGGREGATEFUNCTION =
<TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>
SIMPLEAGGREGATEFUNCTION =
<TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>
TDIGEST =
<TokenType.TDIGEST: 'TDIGEST'>
UNKNOWN =
<TokenType.UNKNOWN: 'UNKNOWN'>
VECTOR =
<TokenType.VECTOR: 'VECTOR'>
DYNAMIC =
<TokenType.DYNAMIC: 'DYNAMIC'>
VOID =
<TokenType.VOID: 'VOID'>
ALIAS =
<TokenType.ALIAS: 'ALIAS'>
ALTER =
<TokenType.ALTER: 'ALTER'>
ALL =
<TokenType.ALL: 'ALL'>
ANTI =
<TokenType.ANTI: 'ANTI'>
ANY =
<TokenType.ANY: 'ANY'>
APPLY =
<TokenType.APPLY: 'APPLY'>
ARRAY =
<TokenType.ARRAY: 'ARRAY'>
ASC =
<TokenType.ASC: 'ASC'>
ASOF =
<TokenType.ASOF: 'ASOF'>
ATTACH =
<TokenType.ATTACH: 'ATTACH'>
AUTO_INCREMENT =
<TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN =
<TokenType.BEGIN: 'BEGIN'>
BETWEEN =
<TokenType.BETWEEN: 'BETWEEN'>
BULK_COLLECT_INTO =
<TokenType.BULK_COLLECT_INTO: 'BULK_COLLECT_INTO'>
CACHE =
<TokenType.CACHE: 'CACHE'>
CASE =
<TokenType.CASE: 'CASE'>
CHARACTER_SET =
<TokenType.CHARACTER_SET: 'CHARACTER_SET'>
CLUSTER_BY =
<TokenType.CLUSTER_BY: 'CLUSTER_BY'>
COLLATE =
<TokenType.COLLATE: 'COLLATE'>
COMMAND =
<TokenType.COMMAND: 'COMMAND'>
COMMENT =
<TokenType.COMMENT: 'COMMENT'>
COMMIT =
<TokenType.COMMIT: 'COMMIT'>
CONNECT_BY =
<TokenType.CONNECT_BY: 'CONNECT_BY'>
CONSTRAINT =
<TokenType.CONSTRAINT: 'CONSTRAINT'>
COPY =
<TokenType.COPY: 'COPY'>
CREATE =
<TokenType.CREATE: 'CREATE'>
CROSS =
<TokenType.CROSS: 'CROSS'>
CUBE =
<TokenType.CUBE: 'CUBE'>
CURRENT_DATE =
<TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME =
<TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_SCHEMA =
<TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>
CURRENT_TIME =
<TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP =
<TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
CURRENT_USER =
<TokenType.CURRENT_USER: 'CURRENT_USER'>
DECLARE =
<TokenType.DECLARE: 'DECLARE'>
DEFAULT =
<TokenType.DEFAULT: 'DEFAULT'>
DELETE =
<TokenType.DELETE: 'DELETE'>
DESC =
<TokenType.DESC: 'DESC'>
DESCRIBE =
<TokenType.DESCRIBE: 'DESCRIBE'>
DETACH =
<TokenType.DETACH: 'DETACH'>
DICTIONARY =
<TokenType.DICTIONARY: 'DICTIONARY'>
DISTINCT =
<TokenType.DISTINCT: 'DISTINCT'>
DISTRIBUTE_BY =
<TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>
DIV =
<TokenType.DIV: 'DIV'>
DROP =
<TokenType.DROP: 'DROP'>
ELSE =
<TokenType.ELSE: 'ELSE'>
END =
<TokenType.END: 'END'>
ESCAPE =
<TokenType.ESCAPE: 'ESCAPE'>
EXCEPT =
<TokenType.EXCEPT: 'EXCEPT'>
EXECUTE =
<TokenType.EXECUTE: 'EXECUTE'>
EXISTS =
<TokenType.EXISTS: 'EXISTS'>
FALSE =
<TokenType.FALSE: 'FALSE'>
FETCH =
<TokenType.FETCH: 'FETCH'>
FILE_FORMAT =
<TokenType.FILE_FORMAT: 'FILE_FORMAT'>
FILTER =
<TokenType.FILTER: 'FILTER'>
FINAL =
<TokenType.FINAL: 'FINAL'>
FIRST =
<TokenType.FIRST: 'FIRST'>
FOR =
<TokenType.FOR: 'FOR'>
FORCE =
<TokenType.FORCE: 'FORCE'>
FOREIGN_KEY =
<TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT =
<TokenType.FORMAT: 'FORMAT'>
FROM =
<TokenType.FROM: 'FROM'>
FULL =
<TokenType.FULL: 'FULL'>
FUNCTION =
<TokenType.FUNCTION: 'FUNCTION'>
GET =
<TokenType.GET: 'GET'>
GLOB =
<TokenType.GLOB: 'GLOB'>
GLOBAL =
<TokenType.GLOBAL: 'GLOBAL'>
GRANT =
<TokenType.GRANT: 'GRANT'>
GROUP_BY =
<TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS =
<TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING =
<TokenType.HAVING: 'HAVING'>
HINT =
<TokenType.HINT: 'HINT'>
IGNORE =
<TokenType.IGNORE: 'IGNORE'>
ILIKE =
<TokenType.ILIKE: 'ILIKE'>
IN =
<TokenType.IN: 'IN'>
INDEX =
<TokenType.INDEX: 'INDEX'>
INNER =
<TokenType.INNER: 'INNER'>
INSERT =
<TokenType.INSERT: 'INSERT'>
INSTALL =
<TokenType.INSTALL: 'INSTALL'>
INTERSECT =
<TokenType.INTERSECT: 'INTERSECT'>
INTERVAL =
<TokenType.INTERVAL: 'INTERVAL'>
INTO =
<TokenType.INTO: 'INTO'>
INTRODUCER =
<TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE =
<TokenType.IRLIKE: 'IRLIKE'>
IS =
<TokenType.IS: 'IS'>
ISNULL =
<TokenType.ISNULL: 'ISNULL'>
JOIN =
<TokenType.JOIN: 'JOIN'>
JOIN_MARKER =
<TokenType.JOIN_MARKER: 'JOIN_MARKER'>
KEEP =
<TokenType.KEEP: 'KEEP'>
KEY =
<TokenType.KEY: 'KEY'>
KILL =
<TokenType.KILL: 'KILL'>
LANGUAGE =
<TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL =
<TokenType.LATERAL: 'LATERAL'>
LEFT =
<TokenType.LEFT: 'LEFT'>
LIKE =
<TokenType.LIKE: 'LIKE'>
LIMIT =
<TokenType.LIMIT: 'LIMIT'>
LIST =
<TokenType.LIST: 'LIST'>
LOAD =
<TokenType.LOAD: 'LOAD'>
LOCK =
<TokenType.LOCK: 'LOCK'>
MAP =
<TokenType.MAP: 'MAP'>
MATCH_CONDITION =
<TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>
MATCH_RECOGNIZE =
<TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MEMBER_OF =
<TokenType.MEMBER_OF: 'MEMBER_OF'>
MERGE =
<TokenType.MERGE: 'MERGE'>
MOD =
<TokenType.MOD: 'MOD'>
MODEL =
<TokenType.MODEL: 'MODEL'>
NATURAL =
<TokenType.NATURAL: 'NATURAL'>
NEXT =
<TokenType.NEXT: 'NEXT'>
NOTHING =
<TokenType.NOTHING: 'NOTHING'>
NOTNULL =
<TokenType.NOTNULL: 'NOTNULL'>
NULL =
<TokenType.NULL: 'NULL'>
OBJECT_IDENTIFIER =
<TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>
OFFSET =
<TokenType.OFFSET: 'OFFSET'>
ON =
<TokenType.ON: 'ON'>
ONLY =
<TokenType.ONLY: 'ONLY'>
OPERATOR =
<TokenType.OPERATOR: 'OPERATOR'>
ORDER_BY =
<TokenType.ORDER_BY: 'ORDER_BY'>
ORDER_SIBLINGS_BY =
<TokenType.ORDER_SIBLINGS_BY: 'ORDER_SIBLINGS_BY'>
ORDERED =
<TokenType.ORDERED: 'ORDERED'>
ORDINALITY =
<TokenType.ORDINALITY: 'ORDINALITY'>
OUTER =
<TokenType.OUTER: 'OUTER'>
OVER =
<TokenType.OVER: 'OVER'>
OVERLAPS =
<TokenType.OVERLAPS: 'OVERLAPS'>
OVERWRITE =
<TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION =
<TokenType.PARTITION: 'PARTITION'>
PARTITION_BY =
<TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT =
<TokenType.PERCENT: 'PERCENT'>
PIVOT =
<TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER =
<TokenType.PLACEHOLDER: 'PLACEHOLDER'>
POSITIONAL =
<TokenType.POSITIONAL: 'POSITIONAL'>
PRAGMA =
<TokenType.PRAGMA: 'PRAGMA'>
PREWHERE =
<TokenType.PREWHERE: 'PREWHERE'>
PRIMARY_KEY =
<TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE =
<TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES =
<TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE =
<TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
PUT =
<TokenType.PUT: 'PUT'>
QUALIFY =
<TokenType.QUALIFY: 'QUALIFY'>
QUOTE =
<TokenType.QUOTE: 'QUOTE'>
RANGE =
<TokenType.RANGE: 'RANGE'>
RECURSIVE =
<TokenType.RECURSIVE: 'RECURSIVE'>
REFRESH =
<TokenType.REFRESH: 'REFRESH'>
RENAME =
<TokenType.RENAME: 'RENAME'>
REPLACE =
<TokenType.REPLACE: 'REPLACE'>
RETURNING =
<TokenType.RETURNING: 'RETURNING'>
REVOKE =
<TokenType.REVOKE: 'REVOKE'>
REFERENCES =
<TokenType.REFERENCES: 'REFERENCES'>
RIGHT =
<TokenType.RIGHT: 'RIGHT'>
RLIKE =
<TokenType.RLIKE: 'RLIKE'>
ROLLBACK =
<TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP =
<TokenType.ROLLUP: 'ROLLUP'>
ROW =
<TokenType.ROW: 'ROW'>
ROWS =
<TokenType.ROWS: 'ROWS'>
SELECT =
<TokenType.SELECT: 'SELECT'>
SEMI =
<TokenType.SEMI: 'SEMI'>
SEPARATOR =
<TokenType.SEPARATOR: 'SEPARATOR'>
SEQUENCE =
<TokenType.SEQUENCE: 'SEQUENCE'>
SERDE_PROPERTIES =
<TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET =
<TokenType.SET: 'SET'>
SETTINGS =
<TokenType.SETTINGS: 'SETTINGS'>
SHOW =
<TokenType.SHOW: 'SHOW'>
SIMILAR_TO =
<TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME =
<TokenType.SOME: 'SOME'>
SORT_BY =
<TokenType.SORT_BY: 'SORT_BY'>
SOUNDS_LIKE =
<TokenType.SOUNDS_LIKE: 'SOUNDS_LIKE'>
START_WITH =
<TokenType.START_WITH: 'START_WITH'>
STORAGE_INTEGRATION =
<TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>
STRAIGHT_JOIN =
<TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>
STRUCT =
<TokenType.STRUCT: 'STRUCT'>
SUMMARIZE =
<TokenType.SUMMARIZE: 'SUMMARIZE'>
TABLE_SAMPLE =
<TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TAG =
<TokenType.TAG: 'TAG'>
TEMPORARY =
<TokenType.TEMPORARY: 'TEMPORARY'>
TOP =
<TokenType.TOP: 'TOP'>
THEN =
<TokenType.THEN: 'THEN'>
TRUE =
<TokenType.TRUE: 'TRUE'>
TRUNCATE =
<TokenType.TRUNCATE: 'TRUNCATE'>
UNCACHE =
<TokenType.UNCACHE: 'UNCACHE'>
UNION =
<TokenType.UNION: 'UNION'>
UNNEST =
<TokenType.UNNEST: 'UNNEST'>
UNPIVOT =
<TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE =
<TokenType.UPDATE: 'UPDATE'>
USE =
<TokenType.USE: 'USE'>
USING =
<TokenType.USING: 'USING'>
VALUES =
<TokenType.VALUES: 'VALUES'>
VIEW =
<TokenType.VIEW: 'VIEW'>
SEMANTIC_VIEW =
<TokenType.SEMANTIC_VIEW: 'SEMANTIC_VIEW'>
VOLATILE =
<TokenType.VOLATILE: 'VOLATILE'>
WHEN =
<TokenType.WHEN: 'WHEN'>
WHERE =
<TokenType.WHERE: 'WHERE'>
WINDOW =
<TokenType.WINDOW: 'WINDOW'>
WITH =
<TokenType.WITH: 'WITH'>
UNIQUE =
<TokenType.UNIQUE: 'UNIQUE'>
UTC_DATE =
<TokenType.UTC_DATE: 'UTC_DATE'>
UTC_TIME =
<TokenType.UTC_TIME: 'UTC_TIME'>
UTC_TIMESTAMP =
<TokenType.UTC_TIMESTAMP: 'UTC_TIMESTAMP'>
VERSION_SNAPSHOT =
<TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>
TIMESTAMP_SNAPSHOT =
<TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>
OPTION =
<TokenType.OPTION: 'OPTION'>
SINK =
<TokenType.SINK: 'SINK'>
SOURCE =
<TokenType.SOURCE: 'SOURCE'>
ANALYZE =
<TokenType.ANALYZE: 'ANALYZE'>
NAMESPACE =
<TokenType.NAMESPACE: 'NAMESPACE'>
EXPORT =
<TokenType.EXPORT: 'EXPORT'>
HIVE_TOKEN_STREAM =
<TokenType.HIVE_TOKEN_STREAM: 'HIVE_TOKEN_STREAM'>
class
Token:
453class Token: 454 __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments") 455 456 @classmethod 457 def number(cls, number: int) -> Token: 458 """Returns a NUMBER token with `number` as its text.""" 459 return cls(TokenType.NUMBER, str(number)) 460 461 @classmethod 462 def string(cls, string: str) -> Token: 463 """Returns a STRING token with `string` as its text.""" 464 return cls(TokenType.STRING, string) 465 466 @classmethod 467 def identifier(cls, identifier: str) -> Token: 468 """Returns an IDENTIFIER token with `identifier` as its text.""" 469 return cls(TokenType.IDENTIFIER, identifier) 470 471 @classmethod 472 def var(cls, var: str) -> Token: 473 """Returns an VAR token with `var` as its text.""" 474 return cls(TokenType.VAR, var) 475 476 def __init__( 477 self, 478 token_type: TokenType, 479 text: str, 480 line: int = 1, 481 col: int = 1, 482 start: int = 0, 483 end: int = 0, 484 comments: t.Optional[t.List[str]] = None, 485 ) -> None: 486 """Token initializer. 487 488 Args: 489 token_type: The TokenType Enum. 490 text: The text of the token. 491 line: The line that the token ends on. 492 col: The column that the token ends on. 493 start: The start index of the token. 494 end: The ending index of the token. 495 comments: The comments to attach to the token. 496 """ 497 self.token_type = token_type 498 self.text = text 499 self.line = line 500 self.col = col 501 self.start = start 502 self.end = end 503 self.comments = [] if comments is None else comments 504 505 def __repr__(self) -> str: 506 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 507 return f"<Token {attributes}>"
Token( token_type: TokenType, text: str, line: int = 1, col: int = 1, start: int = 0, end: int = 0, comments: Optional[List[str]] = None)
476 def __init__( 477 self, 478 token_type: TokenType, 479 text: str, 480 line: int = 1, 481 col: int = 1, 482 start: int = 0, 483 end: int = 0, 484 comments: t.Optional[t.List[str]] = None, 485 ) -> None: 486 """Token initializer. 487 488 Args: 489 token_type: The TokenType Enum. 490 text: The text of the token. 491 line: The line that the token ends on. 492 col: The column that the token ends on. 493 start: The start index of the token. 494 end: The ending index of the token. 495 comments: The comments to attach to the token. 496 """ 497 self.token_type = token_type 498 self.text = text 499 self.line = line 500 self.col = col 501 self.start = start 502 self.end = end 503 self.comments = [] if comments is None else comments
Token initializer.
Arguments:
- token_type: The TokenType Enum.
- text: The text of the token.
- line: The line that the token ends on.
- col: The column that the token ends on.
- start: The start index of the token.
- end: The ending index of the token.
- comments: The comments to attach to the token.
456 @classmethod 457 def number(cls, number: int) -> Token: 458 """Returns a NUMBER token with `number` as its text.""" 459 return cls(TokenType.NUMBER, str(number))
Returns a NUMBER token with number
as its text.
461 @classmethod 462 def string(cls, string: str) -> Token: 463 """Returns a STRING token with `string` as its text.""" 464 return cls(TokenType.STRING, string)
Returns a STRING token with string
as its text.
466 @classmethod 467 def identifier(cls, identifier: str) -> Token: 468 """Returns an IDENTIFIER token with `identifier` as its text.""" 469 return cls(TokenType.IDENTIFIER, identifier)
Returns an IDENTIFIER token with identifier
as its text.
class
Tokenizer:
617class Tokenizer(metaclass=_Tokenizer): 618 SINGLE_TOKENS = { 619 "(": TokenType.L_PAREN, 620 ")": TokenType.R_PAREN, 621 "[": TokenType.L_BRACKET, 622 "]": TokenType.R_BRACKET, 623 "{": TokenType.L_BRACE, 624 "}": TokenType.R_BRACE, 625 "&": TokenType.AMP, 626 "^": TokenType.CARET, 627 ":": TokenType.COLON, 628 ",": TokenType.COMMA, 629 ".": TokenType.DOT, 630 "-": TokenType.DASH, 631 "=": TokenType.EQ, 632 ">": TokenType.GT, 633 "<": TokenType.LT, 634 "%": TokenType.MOD, 635 "!": TokenType.NOT, 636 "|": TokenType.PIPE, 637 "+": TokenType.PLUS, 638 ";": TokenType.SEMICOLON, 639 "/": TokenType.SLASH, 640 "\\": TokenType.BACKSLASH, 641 "*": TokenType.STAR, 642 "~": TokenType.TILDA, 643 "?": TokenType.PLACEHOLDER, 644 "@": TokenType.PARAMETER, 645 "#": TokenType.HASH, 646 # Used for breaking a var like x'y' but nothing else the token type doesn't matter 647 "'": TokenType.UNKNOWN, 648 "`": TokenType.UNKNOWN, 649 '"': TokenType.UNKNOWN, 650 } 651 652 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 653 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 654 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 655 RAW_STRINGS: t.List[str | t.Tuple[str, str]] = [] 656 HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = [] 657 UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 658 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 659 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 660 STRING_ESCAPES = ["'"] 661 VAR_SINGLE_TOKENS: t.Set[str] = set() 662 663 # The strings in this list can always be used as escapes, regardless of the surrounding 664 # identifier delimiters. By default, the closing delimiter is assumed to also act as an 665 # identifier escape, e.g. if we use double-quotes, then they also act as escapes: "x""" 666 IDENTIFIER_ESCAPES: t.List[str] = [] 667 668 # Whether the heredoc tags follow the same lexical rules as unquoted identifiers 669 HEREDOC_TAG_IS_IDENTIFIER = False 670 671 # Token that we'll generate as a fallback if the heredoc prefix doesn't correspond to a heredoc 672 HEREDOC_STRING_ALTERNATIVE = TokenType.VAR 673 674 # Whether string escape characters function as such when placed within raw strings 675 STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS = True 676 677 NESTED_COMMENTS = True 678 679 HINT_START = "/*+" 680 681 TOKENS_PRECEDING_HINT = {TokenType.SELECT, TokenType.INSERT, TokenType.UPDATE, TokenType.DELETE} 682 683 # Autofilled 684 _COMMENTS: t.Dict[str, str] = {} 685 _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {} 686 _IDENTIFIERS: t.Dict[str, str] = {} 687 _IDENTIFIER_ESCAPES: t.Set[str] = set() 688 _QUOTES: t.Dict[str, str] = {} 689 _STRING_ESCAPES: t.Set[str] = set() 690 _KEYWORD_TRIE: t.Dict = {} 691 _RS_TOKENIZER: t.Optional[t.Any] = None 692 693 KEYWORDS: t.Dict[str, TokenType] = { 694 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, 695 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, 696 **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")}, 697 **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")}, 698 HINT_START: TokenType.HINT, 699 "==": TokenType.EQ, 700 "::": TokenType.DCOLON, 701 "||": TokenType.DPIPE, 702 "|>": TokenType.PIPE_GT, 703 ">=": TokenType.GTE, 704 "<=": TokenType.LTE, 705 "<>": TokenType.NEQ, 706 "!=": TokenType.NEQ, 707 ":=": TokenType.COLON_EQ, 708 "<=>": TokenType.NULLSAFE_EQ, 709 "->": TokenType.ARROW, 710 "->>": TokenType.DARROW, 711 "=>": TokenType.FARROW, 712 "#>": TokenType.HASH_ARROW, 713 "#>>": TokenType.DHASH_ARROW, 714 "<->": TokenType.LR_ARROW, 715 "&&": TokenType.DAMP, 716 "??": TokenType.DQMARK, 717 "~~~": TokenType.GLOB, 718 "~~": TokenType.LIKE, 719 "~~*": TokenType.ILIKE, 720 "~*": TokenType.IRLIKE, 721 "ALL": TokenType.ALL, 722 "AND": TokenType.AND, 723 "ANTI": TokenType.ANTI, 724 "ANY": TokenType.ANY, 725 "ASC": TokenType.ASC, 726 "AS": TokenType.ALIAS, 727 "ASOF": TokenType.ASOF, 728 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 729 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 730 "BEGIN": TokenType.BEGIN, 731 "BETWEEN": TokenType.BETWEEN, 732 "CACHE": TokenType.CACHE, 733 "UNCACHE": TokenType.UNCACHE, 734 "CASE": TokenType.CASE, 735 "CHARACTER SET": TokenType.CHARACTER_SET, 736 "CLUSTER BY": TokenType.CLUSTER_BY, 737 "COLLATE": TokenType.COLLATE, 738 "COLUMN": TokenType.COLUMN, 739 "COMMIT": TokenType.COMMIT, 740 "CONNECT BY": TokenType.CONNECT_BY, 741 "CONSTRAINT": TokenType.CONSTRAINT, 742 "COPY": TokenType.COPY, 743 "CREATE": TokenType.CREATE, 744 "CROSS": TokenType.CROSS, 745 "CUBE": TokenType.CUBE, 746 "CURRENT_DATE": TokenType.CURRENT_DATE, 747 "CURRENT_SCHEMA": TokenType.CURRENT_SCHEMA, 748 "CURRENT_TIME": TokenType.CURRENT_TIME, 749 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 750 "CURRENT_USER": TokenType.CURRENT_USER, 751 "DATABASE": TokenType.DATABASE, 752 "DEFAULT": TokenType.DEFAULT, 753 "DELETE": TokenType.DELETE, 754 "DESC": TokenType.DESC, 755 "DESCRIBE": TokenType.DESCRIBE, 756 "DISTINCT": TokenType.DISTINCT, 757 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 758 "DIV": TokenType.DIV, 759 "DROP": TokenType.DROP, 760 "ELSE": TokenType.ELSE, 761 "END": TokenType.END, 762 "ENUM": TokenType.ENUM, 763 "ESCAPE": TokenType.ESCAPE, 764 "EXCEPT": TokenType.EXCEPT, 765 "EXECUTE": TokenType.EXECUTE, 766 "EXISTS": TokenType.EXISTS, 767 "FALSE": TokenType.FALSE, 768 "FETCH": TokenType.FETCH, 769 "FILTER": TokenType.FILTER, 770 "FIRST": TokenType.FIRST, 771 "FULL": TokenType.FULL, 772 "FUNCTION": TokenType.FUNCTION, 773 "FOR": TokenType.FOR, 774 "FOREIGN KEY": TokenType.FOREIGN_KEY, 775 "FORMAT": TokenType.FORMAT, 776 "FROM": TokenType.FROM, 777 "GEOGRAPHY": TokenType.GEOGRAPHY, 778 "GEOMETRY": TokenType.GEOMETRY, 779 "GLOB": TokenType.GLOB, 780 "GROUP BY": TokenType.GROUP_BY, 781 "GROUPING SETS": TokenType.GROUPING_SETS, 782 "HAVING": TokenType.HAVING, 783 "ILIKE": TokenType.ILIKE, 784 "IN": TokenType.IN, 785 "INDEX": TokenType.INDEX, 786 "INET": TokenType.INET, 787 "INNER": TokenType.INNER, 788 "INSERT": TokenType.INSERT, 789 "INTERVAL": TokenType.INTERVAL, 790 "INTERSECT": TokenType.INTERSECT, 791 "INTO": TokenType.INTO, 792 "IS": TokenType.IS, 793 "ISNULL": TokenType.ISNULL, 794 "JOIN": TokenType.JOIN, 795 "KEEP": TokenType.KEEP, 796 "KILL": TokenType.KILL, 797 "LATERAL": TokenType.LATERAL, 798 "LEFT": TokenType.LEFT, 799 "LIKE": TokenType.LIKE, 800 "LIMIT": TokenType.LIMIT, 801 "LOAD": TokenType.LOAD, 802 "LOCK": TokenType.LOCK, 803 "MERGE": TokenType.MERGE, 804 "NAMESPACE": TokenType.NAMESPACE, 805 "NATURAL": TokenType.NATURAL, 806 "NEXT": TokenType.NEXT, 807 "NOT": TokenType.NOT, 808 "NOTNULL": TokenType.NOTNULL, 809 "NULL": TokenType.NULL, 810 "OBJECT": TokenType.OBJECT, 811 "OFFSET": TokenType.OFFSET, 812 "ON": TokenType.ON, 813 "OR": TokenType.OR, 814 "XOR": TokenType.XOR, 815 "ORDER BY": TokenType.ORDER_BY, 816 "ORDINALITY": TokenType.ORDINALITY, 817 "OUTER": TokenType.OUTER, 818 "OVER": TokenType.OVER, 819 "OVERLAPS": TokenType.OVERLAPS, 820 "OVERWRITE": TokenType.OVERWRITE, 821 "PARTITION": TokenType.PARTITION, 822 "PARTITION BY": TokenType.PARTITION_BY, 823 "PARTITIONED BY": TokenType.PARTITION_BY, 824 "PARTITIONED_BY": TokenType.PARTITION_BY, 825 "PERCENT": TokenType.PERCENT, 826 "PIVOT": TokenType.PIVOT, 827 "PRAGMA": TokenType.PRAGMA, 828 "PRIMARY KEY": TokenType.PRIMARY_KEY, 829 "PROCEDURE": TokenType.PROCEDURE, 830 "QUALIFY": TokenType.QUALIFY, 831 "RANGE": TokenType.RANGE, 832 "RECURSIVE": TokenType.RECURSIVE, 833 "REGEXP": TokenType.RLIKE, 834 "RENAME": TokenType.RENAME, 835 "REPLACE": TokenType.REPLACE, 836 "RETURNING": TokenType.RETURNING, 837 "REFERENCES": TokenType.REFERENCES, 838 "RIGHT": TokenType.RIGHT, 839 "RLIKE": TokenType.RLIKE, 840 "ROLLBACK": TokenType.ROLLBACK, 841 "ROLLUP": TokenType.ROLLUP, 842 "ROW": TokenType.ROW, 843 "ROWS": TokenType.ROWS, 844 "SCHEMA": TokenType.SCHEMA, 845 "SELECT": TokenType.SELECT, 846 "SEMI": TokenType.SEMI, 847 "SESSION": TokenType.SESSION, 848 "SET": TokenType.SET, 849 "SETTINGS": TokenType.SETTINGS, 850 "SHOW": TokenType.SHOW, 851 "SIMILAR TO": TokenType.SIMILAR_TO, 852 "SOME": TokenType.SOME, 853 "SORT BY": TokenType.SORT_BY, 854 "START WITH": TokenType.START_WITH, 855 "STRAIGHT_JOIN": TokenType.STRAIGHT_JOIN, 856 "TABLE": TokenType.TABLE, 857 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 858 "TEMP": TokenType.TEMPORARY, 859 "TEMPORARY": TokenType.TEMPORARY, 860 "THEN": TokenType.THEN, 861 "TRUE": TokenType.TRUE, 862 "TRUNCATE": TokenType.TRUNCATE, 863 "UNION": TokenType.UNION, 864 "UNKNOWN": TokenType.UNKNOWN, 865 "UNNEST": TokenType.UNNEST, 866 "UNPIVOT": TokenType.UNPIVOT, 867 "UPDATE": TokenType.UPDATE, 868 "USE": TokenType.USE, 869 "USING": TokenType.USING, 870 "UUID": TokenType.UUID, 871 "VALUES": TokenType.VALUES, 872 "VIEW": TokenType.VIEW, 873 "VOLATILE": TokenType.VOLATILE, 874 "WHEN": TokenType.WHEN, 875 "WHERE": TokenType.WHERE, 876 "WINDOW": TokenType.WINDOW, 877 "WITH": TokenType.WITH, 878 "APPLY": TokenType.APPLY, 879 "ARRAY": TokenType.ARRAY, 880 "BIT": TokenType.BIT, 881 "BOOL": TokenType.BOOLEAN, 882 "BOOLEAN": TokenType.BOOLEAN, 883 "BYTE": TokenType.TINYINT, 884 "MEDIUMINT": TokenType.MEDIUMINT, 885 "INT1": TokenType.TINYINT, 886 "TINYINT": TokenType.TINYINT, 887 "INT16": TokenType.SMALLINT, 888 "SHORT": TokenType.SMALLINT, 889 "SMALLINT": TokenType.SMALLINT, 890 "HUGEINT": TokenType.INT128, 891 "UHUGEINT": TokenType.UINT128, 892 "INT2": TokenType.SMALLINT, 893 "INTEGER": TokenType.INT, 894 "INT": TokenType.INT, 895 "INT4": TokenType.INT, 896 "INT32": TokenType.INT, 897 "INT64": TokenType.BIGINT, 898 "INT128": TokenType.INT128, 899 "INT256": TokenType.INT256, 900 "LONG": TokenType.BIGINT, 901 "BIGINT": TokenType.BIGINT, 902 "INT8": TokenType.TINYINT, 903 "UINT": TokenType.UINT, 904 "UINT128": TokenType.UINT128, 905 "UINT256": TokenType.UINT256, 906 "DEC": TokenType.DECIMAL, 907 "DECIMAL": TokenType.DECIMAL, 908 "DECIMAL32": TokenType.DECIMAL32, 909 "DECIMAL64": TokenType.DECIMAL64, 910 "DECIMAL128": TokenType.DECIMAL128, 911 "DECIMAL256": TokenType.DECIMAL256, 912 "BIGDECIMAL": TokenType.BIGDECIMAL, 913 "BIGNUMERIC": TokenType.BIGDECIMAL, 914 "LIST": TokenType.LIST, 915 "MAP": TokenType.MAP, 916 "NULLABLE": TokenType.NULLABLE, 917 "NUMBER": TokenType.DECIMAL, 918 "NUMERIC": TokenType.DECIMAL, 919 "FIXED": TokenType.DECIMAL, 920 "REAL": TokenType.FLOAT, 921 "FLOAT": TokenType.FLOAT, 922 "FLOAT4": TokenType.FLOAT, 923 "FLOAT8": TokenType.DOUBLE, 924 "DOUBLE": TokenType.DOUBLE, 925 "DOUBLE PRECISION": TokenType.DOUBLE, 926 "JSON": TokenType.JSON, 927 "JSONB": TokenType.JSONB, 928 "CHAR": TokenType.CHAR, 929 "CHARACTER": TokenType.CHAR, 930 "CHAR VARYING": TokenType.VARCHAR, 931 "CHARACTER VARYING": TokenType.VARCHAR, 932 "NCHAR": TokenType.NCHAR, 933 "VARCHAR": TokenType.VARCHAR, 934 "VARCHAR2": TokenType.VARCHAR, 935 "NVARCHAR": TokenType.NVARCHAR, 936 "NVARCHAR2": TokenType.NVARCHAR, 937 "BPCHAR": TokenType.BPCHAR, 938 "STR": TokenType.TEXT, 939 "STRING": TokenType.TEXT, 940 "TEXT": TokenType.TEXT, 941 "LONGTEXT": TokenType.LONGTEXT, 942 "MEDIUMTEXT": TokenType.MEDIUMTEXT, 943 "TINYTEXT": TokenType.TINYTEXT, 944 "CLOB": TokenType.TEXT, 945 "LONGVARCHAR": TokenType.TEXT, 946 "BINARY": TokenType.BINARY, 947 "BLOB": TokenType.VARBINARY, 948 "LONGBLOB": TokenType.LONGBLOB, 949 "MEDIUMBLOB": TokenType.MEDIUMBLOB, 950 "TINYBLOB": TokenType.TINYBLOB, 951 "BYTEA": TokenType.VARBINARY, 952 "VARBINARY": TokenType.VARBINARY, 953 "TIME": TokenType.TIME, 954 "TIMETZ": TokenType.TIMETZ, 955 "TIMESTAMP": TokenType.TIMESTAMP, 956 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 957 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 958 "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ, 959 "TIMESTAMPNTZ": TokenType.TIMESTAMPNTZ, 960 "TIMESTAMP_NTZ": TokenType.TIMESTAMPNTZ, 961 "DATE": TokenType.DATE, 962 "DATETIME": TokenType.DATETIME, 963 "INT4RANGE": TokenType.INT4RANGE, 964 "INT4MULTIRANGE": TokenType.INT4MULTIRANGE, 965 "INT8RANGE": TokenType.INT8RANGE, 966 "INT8MULTIRANGE": TokenType.INT8MULTIRANGE, 967 "NUMRANGE": TokenType.NUMRANGE, 968 "NUMMULTIRANGE": TokenType.NUMMULTIRANGE, 969 "TSRANGE": TokenType.TSRANGE, 970 "TSMULTIRANGE": TokenType.TSMULTIRANGE, 971 "TSTZRANGE": TokenType.TSTZRANGE, 972 "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE, 973 "DATERANGE": TokenType.DATERANGE, 974 "DATEMULTIRANGE": TokenType.DATEMULTIRANGE, 975 "UNIQUE": TokenType.UNIQUE, 976 "VECTOR": TokenType.VECTOR, 977 "STRUCT": TokenType.STRUCT, 978 "SEQUENCE": TokenType.SEQUENCE, 979 "VARIANT": TokenType.VARIANT, 980 "ALTER": TokenType.ALTER, 981 "ANALYZE": TokenType.ANALYZE, 982 "CALL": TokenType.COMMAND, 983 "COMMENT": TokenType.COMMENT, 984 "EXPLAIN": TokenType.COMMAND, 985 "GRANT": TokenType.GRANT, 986 "REVOKE": TokenType.REVOKE, 987 "OPTIMIZE": TokenType.COMMAND, 988 "PREPARE": TokenType.COMMAND, 989 "VACUUM": TokenType.COMMAND, 990 "USER-DEFINED": TokenType.USERDEFINED, 991 "FOR VERSION": TokenType.VERSION_SNAPSHOT, 992 "FOR TIMESTAMP": TokenType.TIMESTAMP_SNAPSHOT, 993 } 994 995 WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = { 996 " ": TokenType.SPACE, 997 "\t": TokenType.SPACE, 998 "\n": TokenType.BREAK, 999 "\r": TokenType.BREAK, 1000 } 1001 1002 COMMANDS = { 1003 TokenType.COMMAND, 1004 TokenType.EXECUTE, 1005 TokenType.FETCH, 1006 TokenType.SHOW, 1007 TokenType.RENAME, 1008 } 1009 1010 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 1011 1012 # Handle numeric literals like in hive (3L = BIGINT) 1013 NUMERIC_LITERALS: t.Dict[str, str] = {} 1014 1015 COMMENTS = ["--", ("/*", "*/")] 1016 1017 __slots__ = ( 1018 "sql", 1019 "size", 1020 "tokens", 1021 "dialect", 1022 "use_rs_tokenizer", 1023 "_start", 1024 "_current", 1025 "_line", 1026 "_col", 1027 "_comments", 1028 "_char", 1029 "_end", 1030 "_peek", 1031 "_prev_token_line", 1032 "_rs_dialect_settings", 1033 ) 1034 1035 def __init__( 1036 self, 1037 dialect: DialectType = None, 1038 use_rs_tokenizer: t.Optional[bool] = None, 1039 **opts: t.Any, 1040 ) -> None: 1041 from sqlglot.dialects import Dialect 1042 1043 self.dialect = Dialect.get_or_raise(dialect) 1044 1045 # initialize `use_rs_tokenizer`, and allow it to be overwritten per Tokenizer instance 1046 self.use_rs_tokenizer = ( 1047 use_rs_tokenizer if use_rs_tokenizer is not None else USE_RS_TOKENIZER 1048 ) 1049 1050 if self.use_rs_tokenizer: 1051 self._rs_dialect_settings = RsTokenizerDialectSettings( 1052 unescaped_sequences=self.dialect.UNESCAPED_SEQUENCES, 1053 identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT, 1054 numbers_can_be_underscore_separated=self.dialect.NUMBERS_CAN_BE_UNDERSCORE_SEPARATED, 1055 ) 1056 1057 self.reset() 1058 1059 def reset(self) -> None: 1060 self.sql = "" 1061 self.size = 0 1062 self.tokens: t.List[Token] = [] 1063 self._start = 0 1064 self._current = 0 1065 self._line = 1 1066 self._col = 0 1067 self._comments: t.List[str] = [] 1068 1069 self._char = "" 1070 self._end = False 1071 self._peek = "" 1072 self._prev_token_line = -1 1073 1074 def tokenize(self, sql: str) -> t.List[Token]: 1075 """Returns a list of tokens corresponding to the SQL string `sql`.""" 1076 if self.use_rs_tokenizer: 1077 return self.tokenize_rs(sql) 1078 1079 self.reset() 1080 self.sql = sql 1081 self.size = len(sql) 1082 1083 try: 1084 self._scan() 1085 except Exception as e: 1086 start = max(self._current - 50, 0) 1087 end = min(self._current + 50, self.size - 1) 1088 context = self.sql[start:end] 1089 raise TokenError(f"Error tokenizing '{context}'") from e 1090 1091 return self.tokens 1092 1093 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 1094 while self.size and not self._end: 1095 current = self._current 1096 1097 # Skip spaces here rather than iteratively calling advance() for performance reasons 1098 while current < self.size: 1099 char = self.sql[current] 1100 1101 if char.isspace() and (char == " " or char == "\t"): 1102 current += 1 1103 else: 1104 break 1105 1106 offset = current - self._current if current > self._current else 1 1107 1108 self._start = current 1109 self._advance(offset) 1110 1111 if not self._char.isspace(): 1112 if self._char.isdigit(): 1113 self._scan_number() 1114 elif self._char in self._IDENTIFIERS: 1115 self._scan_identifier(self._IDENTIFIERS[self._char]) 1116 else: 1117 self._scan_keywords() 1118 1119 if until and until(): 1120 break 1121 1122 if self.tokens and self._comments: 1123 self.tokens[-1].comments.extend(self._comments) 1124 1125 def _chars(self, size: int) -> str: 1126 if size == 1: 1127 return self._char 1128 1129 start = self._current - 1 1130 end = start + size 1131 1132 return self.sql[start:end] if end <= self.size else "" 1133 1134 def _advance(self, i: int = 1, alnum: bool = False) -> None: 1135 if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: 1136 # Ensures we don't count an extra line if we get a \r\n line break sequence 1137 if not (self._char == "\r" and self._peek == "\n"): 1138 self._col = i 1139 self._line += 1 1140 else: 1141 self._col += i 1142 1143 self._current += i 1144 self._end = self._current >= self.size 1145 self._char = self.sql[self._current - 1] 1146 self._peek = "" if self._end else self.sql[self._current] 1147 1148 if alnum and self._char.isalnum(): 1149 # Here we use local variables instead of attributes for better performance 1150 _col = self._col 1151 _current = self._current 1152 _end = self._end 1153 _peek = self._peek 1154 1155 while _peek.isalnum(): 1156 _col += 1 1157 _current += 1 1158 _end = _current >= self.size 1159 _peek = "" if _end else self.sql[_current] 1160 1161 self._col = _col 1162 self._current = _current 1163 self._end = _end 1164 self._peek = _peek 1165 self._char = self.sql[_current - 1] 1166 1167 @property 1168 def _text(self) -> str: 1169 return self.sql[self._start : self._current] 1170 1171 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 1172 self._prev_token_line = self._line 1173 1174 if self._comments and token_type == TokenType.SEMICOLON and self.tokens: 1175 self.tokens[-1].comments.extend(self._comments) 1176 self._comments = [] 1177 1178 self.tokens.append( 1179 Token( 1180 token_type, 1181 text=self._text if text is None else text, 1182 line=self._line, 1183 col=self._col, 1184 start=self._start, 1185 end=self._current - 1, 1186 comments=self._comments, 1187 ) 1188 ) 1189 self._comments = [] 1190 1191 # If we have either a semicolon or a begin token before the command's token, we'll parse 1192 # whatever follows the command's token as a string 1193 if ( 1194 token_type in self.COMMANDS 1195 and self._peek != ";" 1196 and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS) 1197 ): 1198 start = self._current 1199 tokens = len(self.tokens) 1200 self._scan(lambda: self._peek == ";") 1201 self.tokens = self.tokens[:tokens] 1202 text = self.sql[start : self._current].strip() 1203 if text: 1204 self._add(TokenType.STRING, text) 1205 1206 def _scan_keywords(self) -> None: 1207 size = 0 1208 word = None 1209 chars = self._text 1210 char = chars 1211 prev_space = False 1212 skip = False 1213 trie = self._KEYWORD_TRIE 1214 single_token = char in self.SINGLE_TOKENS 1215 1216 while chars: 1217 if skip: 1218 result = TrieResult.PREFIX 1219 else: 1220 result, trie = in_trie(trie, char.upper()) 1221 1222 if result == TrieResult.FAILED: 1223 break 1224 if result == TrieResult.EXISTS: 1225 word = chars 1226 1227 end = self._current + size 1228 size += 1 1229 1230 if end < self.size: 1231 char = self.sql[end] 1232 single_token = single_token or char in self.SINGLE_TOKENS 1233 is_space = char.isspace() 1234 1235 if not is_space or not prev_space: 1236 if is_space: 1237 char = " " 1238 chars += char 1239 prev_space = is_space 1240 skip = False 1241 else: 1242 skip = True 1243 else: 1244 char = "" 1245 break 1246 1247 if word: 1248 if self._scan_string(word): 1249 return 1250 if self._scan_comment(word): 1251 return 1252 if prev_space or single_token or not char: 1253 self._advance(size - 1) 1254 word = word.upper() 1255 self._add(self.KEYWORDS[word], text=word) 1256 return 1257 1258 if self._char in self.SINGLE_TOKENS: 1259 self._add(self.SINGLE_TOKENS[self._char], text=self._char) 1260 return 1261 1262 self._scan_var() 1263 1264 def _scan_comment(self, comment_start: str) -> bool: 1265 if comment_start not in self._COMMENTS: 1266 return False 1267 1268 comment_start_line = self._line 1269 comment_start_size = len(comment_start) 1270 comment_end = self._COMMENTS[comment_start] 1271 1272 if comment_end: 1273 # Skip the comment's start delimiter 1274 self._advance(comment_start_size) 1275 1276 comment_count = 1 1277 comment_end_size = len(comment_end) 1278 1279 while not self._end: 1280 if self._chars(comment_end_size) == comment_end: 1281 comment_count -= 1 1282 if not comment_count: 1283 break 1284 1285 self._advance(alnum=True) 1286 1287 # Nested comments are allowed by some dialects, e.g. databricks, duckdb, postgres 1288 if ( 1289 self.NESTED_COMMENTS 1290 and not self._end 1291 and self._chars(comment_end_size) == comment_start 1292 ): 1293 self._advance(comment_start_size) 1294 comment_count += 1 1295 1296 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) 1297 self._advance(comment_end_size - 1) 1298 else: 1299 while not self._end and self.WHITE_SPACE.get(self._peek) is not TokenType.BREAK: 1300 self._advance(alnum=True) 1301 self._comments.append(self._text[comment_start_size:]) 1302 1303 if ( 1304 comment_start == self.HINT_START 1305 and self.tokens 1306 and self.tokens[-1].token_type in self.TOKENS_PRECEDING_HINT 1307 ): 1308 self._add(TokenType.HINT) 1309 1310 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 1311 # Multiple consecutive comments are preserved by appending them to the current comments list. 1312 if comment_start_line == self._prev_token_line: 1313 self.tokens[-1].comments.extend(self._comments) 1314 self._comments = [] 1315 self._prev_token_line = self._line 1316 1317 return True 1318 1319 def _scan_number(self) -> None: 1320 if self._char == "0": 1321 peek = self._peek.upper() 1322 if peek == "B": 1323 return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER) 1324 elif peek == "X": 1325 return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER) 1326 1327 decimal = False 1328 scientific = 0 1329 1330 while True: 1331 if self._peek.isdigit(): 1332 self._advance() 1333 elif self._peek == "." and not decimal: 1334 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER: 1335 return self._add(TokenType.NUMBER) 1336 decimal = True 1337 self._advance() 1338 elif self._peek in ("-", "+") and scientific == 1: 1339 scientific += 1 1340 self._advance() 1341 elif self._peek.upper() == "E" and not scientific: 1342 scientific += 1 1343 self._advance() 1344 elif self._peek.isidentifier(): 1345 number_text = self._text 1346 literal = "" 1347 1348 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: 1349 literal += self._peek 1350 self._advance() 1351 1352 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal.upper(), "")) 1353 1354 if token_type: 1355 self._add(TokenType.NUMBER, number_text) 1356 self._add(TokenType.DCOLON, "::") 1357 return self._add(token_type, literal) 1358 else: 1359 replaced = literal.replace("_", "") 1360 if self.dialect.NUMBERS_CAN_BE_UNDERSCORE_SEPARATED and replaced.isdigit(): 1361 return self._add(TokenType.NUMBER, number_text + replaced) 1362 if self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT: 1363 return self._add(TokenType.VAR) 1364 1365 self._advance(-len(literal)) 1366 return self._add(TokenType.NUMBER, number_text) 1367 else: 1368 return self._add(TokenType.NUMBER) 1369 1370 def _scan_bits(self) -> None: 1371 self._advance() 1372 value = self._extract_value() 1373 try: 1374 # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier 1375 int(value, 2) 1376 self._add(TokenType.BIT_STRING, value[2:]) # Drop the 0b 1377 except ValueError: 1378 self._add(TokenType.IDENTIFIER) 1379 1380 def _scan_hex(self) -> None: 1381 self._advance() 1382 value = self._extract_value() 1383 try: 1384 # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier 1385 int(value, 16) 1386 self._add(TokenType.HEX_STRING, value[2:]) # Drop the 0x 1387 except ValueError: 1388 self._add(TokenType.IDENTIFIER) 1389 1390 def _extract_value(self) -> str: 1391 while True: 1392 char = self._peek.strip() 1393 if char and char not in self.SINGLE_TOKENS: 1394 self._advance(alnum=True) 1395 else: 1396 break 1397 1398 return self._text 1399 1400 def _scan_string(self, start: str) -> bool: 1401 base = None 1402 token_type = TokenType.STRING 1403 1404 if start in self._QUOTES: 1405 end = self._QUOTES[start] 1406 elif start in self._FORMAT_STRINGS: 1407 end, token_type = self._FORMAT_STRINGS[start] 1408 1409 if token_type == TokenType.HEX_STRING: 1410 base = 16 1411 elif token_type == TokenType.BIT_STRING: 1412 base = 2 1413 elif token_type == TokenType.HEREDOC_STRING: 1414 self._advance() 1415 1416 if self._char == end: 1417 tag = "" 1418 else: 1419 tag = self._extract_string( 1420 end, 1421 raw_string=True, 1422 raise_unmatched=not self.HEREDOC_TAG_IS_IDENTIFIER, 1423 ) 1424 1425 if tag and self.HEREDOC_TAG_IS_IDENTIFIER and (self._end or not tag.isidentifier()): 1426 if not self._end: 1427 self._advance(-1) 1428 1429 self._advance(-len(tag)) 1430 self._add(self.HEREDOC_STRING_ALTERNATIVE) 1431 return True 1432 1433 end = f"{start}{tag}{end}" 1434 else: 1435 return False 1436 1437 self._advance(len(start)) 1438 text = self._extract_string(end, raw_string=token_type == TokenType.RAW_STRING) 1439 1440 if base and text: 1441 try: 1442 int(text, base) 1443 except Exception: 1444 raise TokenError( 1445 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1446 ) 1447 1448 self._add(token_type, text) 1449 return True 1450 1451 def _scan_identifier(self, identifier_end: str) -> None: 1452 self._advance() 1453 text = self._extract_string( 1454 identifier_end, escapes=self._IDENTIFIER_ESCAPES | {identifier_end} 1455 ) 1456 self._add(TokenType.IDENTIFIER, text) 1457 1458 def _scan_var(self) -> None: 1459 while True: 1460 char = self._peek.strip() 1461 if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS): 1462 self._advance(alnum=True) 1463 else: 1464 break 1465 1466 self._add( 1467 TokenType.VAR 1468 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER 1469 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1470 ) 1471 1472 def _extract_string( 1473 self, 1474 delimiter: str, 1475 escapes: t.Optional[t.Set[str]] = None, 1476 raw_string: bool = False, 1477 raise_unmatched: bool = True, 1478 ) -> str: 1479 text = "" 1480 delim_size = len(delimiter) 1481 escapes = self._STRING_ESCAPES if escapes is None else escapes 1482 1483 while True: 1484 if ( 1485 not raw_string 1486 and self.dialect.UNESCAPED_SEQUENCES 1487 and self._peek 1488 and self._char in self.STRING_ESCAPES 1489 ): 1490 unescaped_sequence = self.dialect.UNESCAPED_SEQUENCES.get(self._char + self._peek) 1491 if unescaped_sequence: 1492 self._advance(2) 1493 text += unescaped_sequence 1494 continue 1495 if ( 1496 (self.STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS or not raw_string) 1497 and self._char in escapes 1498 and (self._peek == delimiter or self._peek in escapes) 1499 and (self._char not in self._QUOTES or self._char == self._peek) 1500 ): 1501 if self._peek == delimiter: 1502 text += self._peek 1503 else: 1504 text += self._char + self._peek 1505 1506 if self._current + 1 < self.size: 1507 self._advance(2) 1508 else: 1509 raise TokenError(f"Missing {delimiter} from {self._line}:{self._current}") 1510 else: 1511 if self._chars(delim_size) == delimiter: 1512 if delim_size > 1: 1513 self._advance(delim_size - 1) 1514 break 1515 1516 if self._end: 1517 if not raise_unmatched: 1518 return text + self._char 1519 1520 raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}") 1521 1522 current = self._current - 1 1523 self._advance(alnum=True) 1524 text += self.sql[current : self._current - 1] 1525 1526 return text 1527 1528 def tokenize_rs(self, sql: str) -> t.List[Token]: 1529 if not self._RS_TOKENIZER: 1530 raise SqlglotError("Rust tokenizer is not available") 1531 1532 tokens, error_msg = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings) 1533 for token in tokens: 1534 token.token_type = _ALL_TOKEN_TYPES[token.token_type_index] 1535 1536 # Setting this here so partial token lists can be inspected even if there is a failure 1537 self.tokens = tokens 1538 1539 if error_msg is not None: 1540 raise TokenError(error_msg) 1541 1542 return tokens
Tokenizer( dialect: Union[str, sqlglot.dialects.Dialect, Type[sqlglot.dialects.Dialect], NoneType] = None, use_rs_tokenizer: Optional[bool] = None, **opts: Any)
1035 def __init__( 1036 self, 1037 dialect: DialectType = None, 1038 use_rs_tokenizer: t.Optional[bool] = None, 1039 **opts: t.Any, 1040 ) -> None: 1041 from sqlglot.dialects import Dialect 1042 1043 self.dialect = Dialect.get_or_raise(dialect) 1044 1045 # initialize `use_rs_tokenizer`, and allow it to be overwritten per Tokenizer instance 1046 self.use_rs_tokenizer = ( 1047 use_rs_tokenizer if use_rs_tokenizer is not None else USE_RS_TOKENIZER 1048 ) 1049 1050 if self.use_rs_tokenizer: 1051 self._rs_dialect_settings = RsTokenizerDialectSettings( 1052 unescaped_sequences=self.dialect.UNESCAPED_SEQUENCES, 1053 identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT, 1054 numbers_can_be_underscore_separated=self.dialect.NUMBERS_CAN_BE_UNDERSCORE_SEPARATED, 1055 ) 1056 1057 self.reset()
SINGLE_TOKENS =
{'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>}
HEREDOC_STRING_ALTERNATIVE =
<TokenType.VAR: 'VAR'>
TOKENS_PRECEDING_HINT =
{<TokenType.DELETE: 'DELETE'>, <TokenType.INSERT: 'INSERT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.SELECT: 'SELECT'>}
KEYWORDS: Dict[str, TokenType] =
{'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '|>': <TokenType.PIPE_GT: 'PIPE_GT'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_SCHEMA': <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NAMESPACE': <TokenType.NAMESPACE: 'NAMESPACE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SESSION': <TokenType.SESSION: 'SESSION'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT256': <TokenType.INT256: 'INT256'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'UINT128': <TokenType.UINT128: 'UINT128'>, 'UINT256': <TokenType.UINT256: 'UINT256'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'DECIMAL256': <TokenType.DECIMAL256: 'DECIMAL256'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.ANALYZE: 'ANALYZE'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'REVOKE': <TokenType.REVOKE: 'REVOKE'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>}
WHITE_SPACE: Dict[Optional[str], TokenType] =
{' ': <TokenType.SPACE: 'SPACE'>, '\t': <TokenType.SPACE: 'SPACE'>, '\n': <TokenType.BREAK: 'BREAK'>, '\r': <TokenType.BREAK: 'BREAK'>}
COMMANDS =
{<TokenType.FETCH: 'FETCH'>, <TokenType.RENAME: 'RENAME'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.SHOW: 'SHOW'>}
COMMAND_PREFIX_TOKENS =
{<TokenType.SEMICOLON: 'SEMICOLON'>, <TokenType.BEGIN: 'BEGIN'>}
def
reset(self) -> None:
1059 def reset(self) -> None: 1060 self.sql = "" 1061 self.size = 0 1062 self.tokens: t.List[Token] = [] 1063 self._start = 0 1064 self._current = 0 1065 self._line = 1 1066 self._col = 0 1067 self._comments: t.List[str] = [] 1068 1069 self._char = "" 1070 self._end = False 1071 self._peek = "" 1072 self._prev_token_line = -1
1074 def tokenize(self, sql: str) -> t.List[Token]: 1075 """Returns a list of tokens corresponding to the SQL string `sql`.""" 1076 if self.use_rs_tokenizer: 1077 return self.tokenize_rs(sql) 1078 1079 self.reset() 1080 self.sql = sql 1081 self.size = len(sql) 1082 1083 try: 1084 self._scan() 1085 except Exception as e: 1086 start = max(self._current - 50, 0) 1087 end = min(self._current + 50, self.size - 1) 1088 context = self.sql[start:end] 1089 raise TokenError(f"Error tokenizing '{context}'") from e 1090 1091 return self.tokens
Returns a list of tokens corresponding to the SQL string sql
.
1528 def tokenize_rs(self, sql: str) -> t.List[Token]: 1529 if not self._RS_TOKENIZER: 1530 raise SqlglotError("Rust tokenizer is not available") 1531 1532 tokens, error_msg = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings) 1533 for token in tokens: 1534 token.token_type = _ALL_TOKEN_TYPES[token.token_type_index] 1535 1536 # Setting this here so partial token lists can be inspected even if there is a failure 1537 self.tokens = tokens 1538 1539 if error_msg is not None: 1540 raise TokenError(error_msg) 1541 1542 return tokens