Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    build_default_decimal_type,
  11    build_timestamp_from_parts,
  12    date_delta_sql,
  13    date_trunc_to_time,
  14    datestrtodate_sql,
  15    build_formatted_time,
  16    if_sql,
  17    inline_array_sql,
  18    max_or_greatest,
  19    min_or_least,
  20    rename_func,
  21    timestamptrunc_sql,
  22    timestrtotime_sql,
  23    var_map_sql,
  24    map_date_part,
  25)
  26from sqlglot.helper import flatten, is_float, is_int, seq_get
  27from sqlglot.tokens import TokenType
  28
  29if t.TYPE_CHECKING:
  30    from sqlglot._typing import E
  31
  32
  33# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  34def _build_datetime(
  35    name: str, kind: exp.DataType.Type, safe: bool = False
  36) -> t.Callable[[t.List], exp.Func]:
  37    def _builder(args: t.List) -> exp.Func:
  38        value = seq_get(args, 0)
  39        int_value = value is not None and is_int(value.name)
  40
  41        if isinstance(value, exp.Literal):
  42            # Converts calls like `TO_TIME('01:02:03')` into casts
  43            if len(args) == 1 and value.is_string and not int_value:
  44                return exp.cast(value, kind)
  45
  46            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  47            # cases so we can transpile them, since they're relatively common
  48            if kind == exp.DataType.Type.TIMESTAMP:
  49                if int_value:
  50                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  51                if not is_float(value.this):
  52                    return build_formatted_time(exp.StrToTime, "snowflake")(args)
  53
  54        if kind == exp.DataType.Type.DATE and not int_value:
  55            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  56            formatted_exp.set("safe", safe)
  57            return formatted_exp
  58
  59        return exp.Anonymous(this=name, expressions=args)
  60
  61    return _builder
  62
  63
  64def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  65    expression = parser.build_var_map(args)
  66
  67    if isinstance(expression, exp.StarMap):
  68        return expression
  69
  70    return exp.Struct(
  71        expressions=[
  72            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  73        ]
  74    )
  75
  76
  77def _build_datediff(args: t.List) -> exp.DateDiff:
  78    return exp.DateDiff(
  79        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  80    )
  81
  82
  83def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
  84    def _builder(args: t.List) -> E:
  85        return expr_type(
  86            this=seq_get(args, 2),
  87            expression=seq_get(args, 1),
  88            unit=map_date_part(seq_get(args, 0)),
  89        )
  90
  91    return _builder
  92
  93
  94# https://docs.snowflake.com/en/sql-reference/functions/div0
  95def _build_if_from_div0(args: t.List) -> exp.If:
  96    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
  97    true = exp.Literal.number(0)
  98    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
  99    return exp.If(this=cond, true=true, false=false)
 100
 101
 102# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 103def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 104    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 105    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 106
 107
 108# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 109def _build_if_from_nullifzero(args: t.List) -> exp.If:
 110    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 111    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 112
 113
 114def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 115    flag = expression.text("flag")
 116
 117    if "i" not in flag:
 118        flag += "i"
 119
 120    return self.func(
 121        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 122    )
 123
 124
 125def _build_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]:
 126    if len(args) == 3:
 127        return exp.Anonymous(this="CONVERT_TIMEZONE", expressions=args)
 128    return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))
 129
 130
 131def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 132    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 133
 134    if not regexp_replace.args.get("replacement"):
 135        regexp_replace.set("replacement", exp.Literal.string(""))
 136
 137    return regexp_replace
 138
 139
 140def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 141    def _parse(self: Snowflake.Parser) -> exp.Show:
 142        return self._parse_show_snowflake(*args, **kwargs)
 143
 144    return _parse
 145
 146
 147def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 148    trunc = date_trunc_to_time(args)
 149    trunc.set("unit", map_date_part(trunc.args["unit"]))
 150    return trunc
 151
 152
 153def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 154    """
 155    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 156    so we need to unqualify them.
 157
 158    Example:
 159        >>> from sqlglot import parse_one
 160        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 161        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 162        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 163    """
 164    if isinstance(expression, exp.Pivot) and expression.unpivot:
 165        expression = transforms.unqualify_columns(expression)
 166
 167    return expression
 168
 169
 170def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 171    assert isinstance(expression, exp.Create)
 172
 173    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 174        if expression.this in exp.DataType.NESTED_TYPES:
 175            expression.set("expressions", None)
 176        return expression
 177
 178    props = expression.args.get("properties")
 179    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 180        for schema_expression in expression.this.expressions:
 181            if isinstance(schema_expression, exp.ColumnDef):
 182                column_type = schema_expression.kind
 183                if isinstance(column_type, exp.DataType):
 184                    column_type.transform(_flatten_structured_type, copy=False)
 185
 186    return expression
 187
 188
 189class Snowflake(Dialect):
 190    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 191    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 192    NULL_ORDERING = "nulls_are_large"
 193    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 194    SUPPORTS_USER_DEFINED_TYPES = False
 195    SUPPORTS_SEMI_ANTI_JOIN = False
 196    PREFER_CTE_ALIAS_COLUMN = True
 197    TABLESAMPLE_SIZE_IS_PERCENT = True
 198    COPY_PARAMS_ARE_CSV = False
 199
 200    TIME_MAPPING = {
 201        "YYYY": "%Y",
 202        "yyyy": "%Y",
 203        "YY": "%y",
 204        "yy": "%y",
 205        "MMMM": "%B",
 206        "mmmm": "%B",
 207        "MON": "%b",
 208        "mon": "%b",
 209        "MM": "%m",
 210        "mm": "%m",
 211        "DD": "%d",
 212        "dd": "%-d",
 213        "DY": "%a",
 214        "dy": "%w",
 215        "HH24": "%H",
 216        "hh24": "%H",
 217        "HH12": "%I",
 218        "hh12": "%I",
 219        "MI": "%M",
 220        "mi": "%M",
 221        "SS": "%S",
 222        "ss": "%S",
 223        "FF": "%f",
 224        "ff": "%f",
 225        "FF6": "%f",
 226        "ff6": "%f",
 227    }
 228
 229    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 230        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 231        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 232        if (
 233            isinstance(expression, exp.Identifier)
 234            and isinstance(expression.parent, exp.Table)
 235            and expression.name.lower() == "dual"
 236        ):
 237            return expression  # type: ignore
 238
 239        return super().quote_identifier(expression, identify=identify)
 240
 241    class Parser(parser.Parser):
 242        IDENTIFY_PIVOT_STRINGS = True
 243        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 244        COLON_IS_JSON_EXTRACT = True
 245
 246        ID_VAR_TOKENS = {
 247            *parser.Parser.ID_VAR_TOKENS,
 248            TokenType.MATCH_CONDITION,
 249        }
 250
 251        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 252        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 253
 254        FUNCTIONS = {
 255            **parser.Parser.FUNCTIONS,
 256            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 257            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 258            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 259            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 260                this=seq_get(args, 1), expression=seq_get(args, 0)
 261            ),
 262            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 263                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 264                start=seq_get(args, 0),
 265                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 266                step=seq_get(args, 2),
 267            ),
 268            "BITXOR": binary_from_function(exp.BitwiseXor),
 269            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 270            "BOOLXOR": binary_from_function(exp.Xor),
 271            "CONVERT_TIMEZONE": _build_convert_timezone,
 272            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 273            "DATE_TRUNC": _date_trunc_to_time,
 274            "DATEADD": _build_date_time_add(exp.DateAdd),
 275            "DATEDIFF": _build_datediff,
 276            "DIV0": _build_if_from_div0,
 277            "FLATTEN": exp.Explode.from_arg_list,
 278            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 279                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 280            ),
 281            "IFF": exp.If.from_arg_list,
 282            "LAST_DAY": lambda args: exp.LastDay(
 283                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 284            ),
 285            "LISTAGG": exp.GroupConcat.from_arg_list,
 286            "MEDIAN": lambda args: exp.PercentileCont(
 287                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 288            ),
 289            "NULLIFZERO": _build_if_from_nullifzero,
 290            "OBJECT_CONSTRUCT": _build_object_construct,
 291            "REGEXP_REPLACE": _build_regexp_replace,
 292            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 293            "RLIKE": exp.RegexpLike.from_arg_list,
 294            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 295            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 296            "TIMEDIFF": _build_datediff,
 297            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 298            "TIMESTAMPDIFF": _build_datediff,
 299            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 300            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 301            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 302            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 303            "TO_NUMBER": lambda args: exp.ToNumber(
 304                this=seq_get(args, 0),
 305                format=seq_get(args, 1),
 306                precision=seq_get(args, 2),
 307                scale=seq_get(args, 3),
 308            ),
 309            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 310            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 311            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 312            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 313            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 314            "TO_VARCHAR": exp.ToChar.from_arg_list,
 315            "ZEROIFNULL": _build_if_from_zeroifnull,
 316        }
 317
 318        FUNCTION_PARSERS = {
 319            **parser.Parser.FUNCTION_PARSERS,
 320            "DATE_PART": lambda self: self._parse_date_part(),
 321            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 322        }
 323        FUNCTION_PARSERS.pop("TRIM")
 324
 325        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 326
 327        RANGE_PARSERS = {
 328            **parser.Parser.RANGE_PARSERS,
 329            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 330            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 331        }
 332
 333        ALTER_PARSERS = {
 334            **parser.Parser.ALTER_PARSERS,
 335            "UNSET": lambda self: self.expression(
 336                exp.Set,
 337                tag=self._match_text_seq("TAG"),
 338                expressions=self._parse_csv(self._parse_id_var),
 339                unset=True,
 340            ),
 341            "SWAP": lambda self: self._parse_alter_table_swap(),
 342        }
 343
 344        STATEMENT_PARSERS = {
 345            **parser.Parser.STATEMENT_PARSERS,
 346            TokenType.SHOW: lambda self: self._parse_show(),
 347        }
 348
 349        PROPERTY_PARSERS = {
 350            **parser.Parser.PROPERTY_PARSERS,
 351            "LOCATION": lambda self: self._parse_location_property(),
 352        }
 353
 354        TYPE_CONVERTERS = {
 355            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 356            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 357        }
 358
 359        SHOW_PARSERS = {
 360            "SCHEMAS": _show_parser("SCHEMAS"),
 361            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 362            "OBJECTS": _show_parser("OBJECTS"),
 363            "TERSE OBJECTS": _show_parser("OBJECTS"),
 364            "TABLES": _show_parser("TABLES"),
 365            "TERSE TABLES": _show_parser("TABLES"),
 366            "VIEWS": _show_parser("VIEWS"),
 367            "TERSE VIEWS": _show_parser("VIEWS"),
 368            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 369            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 370            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 371            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 372            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 373            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 374            "SEQUENCES": _show_parser("SEQUENCES"),
 375            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 376            "COLUMNS": _show_parser("COLUMNS"),
 377            "USERS": _show_parser("USERS"),
 378            "TERSE USERS": _show_parser("USERS"),
 379        }
 380
 381        CONSTRAINT_PARSERS = {
 382            **parser.Parser.CONSTRAINT_PARSERS,
 383            "WITH": lambda self: self._parse_with_constraint(),
 384            "MASKING": lambda self: self._parse_with_constraint(),
 385            "PROJECTION": lambda self: self._parse_with_constraint(),
 386            "TAG": lambda self: self._parse_with_constraint(),
 387        }
 388
 389        STAGED_FILE_SINGLE_TOKENS = {
 390            TokenType.DOT,
 391            TokenType.MOD,
 392            TokenType.SLASH,
 393        }
 394
 395        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 396
 397        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 398
 399        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 400
 401        LAMBDAS = {
 402            **parser.Parser.LAMBDAS,
 403            TokenType.ARROW: lambda self, expressions: self.expression(
 404                exp.Lambda,
 405                this=self._replace_lambda(
 406                    self._parse_assignment(),
 407                    expressions,
 408                ),
 409                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 410            ),
 411        }
 412
 413        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 414            if self._prev.token_type != TokenType.WITH:
 415                self._retreat(self._index - 1)
 416
 417            if self._match_text_seq("MASKING", "POLICY"):
 418                policy = self._parse_column()
 419                return self.expression(
 420                    exp.MaskingPolicyColumnConstraint,
 421                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 422                    expressions=self._match(TokenType.USING)
 423                    and self._parse_wrapped_csv(self._parse_id_var),
 424                )
 425            if self._match_text_seq("PROJECTION", "POLICY"):
 426                policy = self._parse_column()
 427                return self.expression(
 428                    exp.ProjectionPolicyColumnConstraint,
 429                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 430                )
 431            if self._match(TokenType.TAG):
 432                return self.expression(
 433                    exp.TagColumnConstraint,
 434                    expressions=self._parse_wrapped_csv(self._parse_property),
 435                )
 436
 437            return None
 438
 439        def _parse_create(self) -> exp.Create | exp.Command:
 440            expression = super()._parse_create()
 441            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 442                # Replace the Table node with the enclosed Identifier
 443                expression.this.replace(expression.this.this)
 444
 445            return expression
 446
 447        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 448        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 449        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 450            this = self._parse_var() or self._parse_type()
 451
 452            if not this:
 453                return None
 454
 455            self._match(TokenType.COMMA)
 456            expression = self._parse_bitwise()
 457            this = map_date_part(this)
 458            name = this.name.upper()
 459
 460            if name.startswith("EPOCH"):
 461                if name == "EPOCH_MILLISECOND":
 462                    scale = 10**3
 463                elif name == "EPOCH_MICROSECOND":
 464                    scale = 10**6
 465                elif name == "EPOCH_NANOSECOND":
 466                    scale = 10**9
 467                else:
 468                    scale = None
 469
 470                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 471                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 472
 473                if scale:
 474                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 475
 476                return to_unix
 477
 478            return self.expression(exp.Extract, this=this, expression=expression)
 479
 480        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 481            if is_map:
 482                # Keys are strings in Snowflake's objects, see also:
 483                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 484                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 485                return self._parse_slice(self._parse_string())
 486
 487            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 488
 489        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 490            lateral = super()._parse_lateral()
 491            if not lateral:
 492                return lateral
 493
 494            if isinstance(lateral.this, exp.Explode):
 495                table_alias = lateral.args.get("alias")
 496                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 497                if table_alias and not table_alias.args.get("columns"):
 498                    table_alias.set("columns", columns)
 499                elif not table_alias:
 500                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 501
 502            return lateral
 503
 504        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 505            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 506            index = self._index
 507            if self._match_texts(("AT", "BEFORE")):
 508                this = self._prev.text.upper()
 509                kind = (
 510                    self._match(TokenType.L_PAREN)
 511                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 512                    and self._prev.text.upper()
 513                )
 514                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 515
 516                if expression:
 517                    self._match_r_paren()
 518                    when = self.expression(
 519                        exp.HistoricalData, this=this, kind=kind, expression=expression
 520                    )
 521                    table.set("when", when)
 522                else:
 523                    self._retreat(index)
 524
 525            return table
 526
 527        def _parse_table_parts(
 528            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 529        ) -> exp.Table:
 530            # https://docs.snowflake.com/en/user-guide/querying-stage
 531            if self._match(TokenType.STRING, advance=False):
 532                table = self._parse_string()
 533            elif self._match_text_seq("@", advance=False):
 534                table = self._parse_location_path()
 535            else:
 536                table = None
 537
 538            if table:
 539                file_format = None
 540                pattern = None
 541
 542                wrapped = self._match(TokenType.L_PAREN)
 543                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 544                    if self._match_text_seq("FILE_FORMAT", "=>"):
 545                        file_format = self._parse_string() or super()._parse_table_parts(
 546                            is_db_reference=is_db_reference
 547                        )
 548                    elif self._match_text_seq("PATTERN", "=>"):
 549                        pattern = self._parse_string()
 550                    else:
 551                        break
 552
 553                    self._match(TokenType.COMMA)
 554
 555                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 556            else:
 557                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 558
 559            return self._parse_at_before(table)
 560
 561        def _parse_id_var(
 562            self,
 563            any_token: bool = True,
 564            tokens: t.Optional[t.Collection[TokenType]] = None,
 565        ) -> t.Optional[exp.Expression]:
 566            if self._match_text_seq("IDENTIFIER", "("):
 567                identifier = (
 568                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 569                    or self._parse_string()
 570                )
 571                self._match_r_paren()
 572                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 573
 574            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 575
 576        def _parse_show_snowflake(self, this: str) -> exp.Show:
 577            scope = None
 578            scope_kind = None
 579
 580            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 581            # which is syntactically valid but has no effect on the output
 582            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 583
 584            history = self._match_text_seq("HISTORY")
 585
 586            like = self._parse_string() if self._match(TokenType.LIKE) else None
 587
 588            if self._match(TokenType.IN):
 589                if self._match_text_seq("ACCOUNT"):
 590                    scope_kind = "ACCOUNT"
 591                elif self._match_set(self.DB_CREATABLES):
 592                    scope_kind = self._prev.text.upper()
 593                    if self._curr:
 594                        scope = self._parse_table_parts()
 595                elif self._curr:
 596                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 597                    scope = self._parse_table_parts()
 598
 599            return self.expression(
 600                exp.Show,
 601                **{
 602                    "terse": terse,
 603                    "this": this,
 604                    "history": history,
 605                    "like": like,
 606                    "scope": scope,
 607                    "scope_kind": scope_kind,
 608                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 609                    "limit": self._parse_limit(),
 610                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 611                },
 612            )
 613
 614        def _parse_alter_table_swap(self) -> exp.SwapTable:
 615            self._match_text_seq("WITH")
 616            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 617
 618        def _parse_location_property(self) -> exp.LocationProperty:
 619            self._match(TokenType.EQ)
 620            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 621
 622        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 623            # Parse either a subquery or a staged file
 624            return (
 625                self._parse_select(table=True, parse_subquery_alias=False)
 626                if self._match(TokenType.L_PAREN, advance=False)
 627                else self._parse_table_parts()
 628            )
 629
 630        def _parse_location_path(self) -> exp.Var:
 631            parts = [self._advance_any(ignore_reserved=True)]
 632
 633            # We avoid consuming a comma token because external tables like @foo and @bar
 634            # can be joined in a query with a comma separator, as well as closing paren
 635            # in case of subqueries
 636            while self._is_connected() and not self._match_set(
 637                (TokenType.COMMA, TokenType.R_PAREN), advance=False
 638            ):
 639                parts.append(self._advance_any(ignore_reserved=True))
 640
 641            return exp.var("".join(part.text for part in parts if part))
 642
 643        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 644            this = super()._parse_lambda_arg()
 645
 646            if not this:
 647                return this
 648
 649            typ = self._parse_types()
 650
 651            if typ:
 652                return self.expression(exp.Cast, this=this, to=typ)
 653
 654            return this
 655
 656    class Tokenizer(tokens.Tokenizer):
 657        STRING_ESCAPES = ["\\", "'"]
 658        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 659        RAW_STRINGS = ["$$"]
 660        COMMENTS = ["--", "//", ("/*", "*/")]
 661
 662        KEYWORDS = {
 663            **tokens.Tokenizer.KEYWORDS,
 664            "BYTEINT": TokenType.INT,
 665            "CHAR VARYING": TokenType.VARCHAR,
 666            "CHARACTER VARYING": TokenType.VARCHAR,
 667            "EXCLUDE": TokenType.EXCEPT,
 668            "ILIKE ANY": TokenType.ILIKE_ANY,
 669            "LIKE ANY": TokenType.LIKE_ANY,
 670            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 671            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 672            "MINUS": TokenType.EXCEPT,
 673            "NCHAR VARYING": TokenType.VARCHAR,
 674            "PUT": TokenType.COMMAND,
 675            "REMOVE": TokenType.COMMAND,
 676            "RM": TokenType.COMMAND,
 677            "SAMPLE": TokenType.TABLE_SAMPLE,
 678            "SQL_DOUBLE": TokenType.DOUBLE,
 679            "SQL_VARCHAR": TokenType.VARCHAR,
 680            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 681            "TAG": TokenType.TAG,
 682            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 683            "TOP": TokenType.TOP,
 684            "WAREHOUSE": TokenType.WAREHOUSE,
 685            "STREAMLIT": TokenType.STREAMLIT,
 686        }
 687
 688        SINGLE_TOKENS = {
 689            **tokens.Tokenizer.SINGLE_TOKENS,
 690            "$": TokenType.PARAMETER,
 691        }
 692
 693        VAR_SINGLE_TOKENS = {"$"}
 694
 695        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 696
 697    class Generator(generator.Generator):
 698        PARAMETER_TOKEN = "$"
 699        MATCHED_BY_SOURCE = False
 700        SINGLE_STRING_INTERVAL = True
 701        JOIN_HINTS = False
 702        TABLE_HINTS = False
 703        QUERY_HINTS = False
 704        AGGREGATE_FILTER_SUPPORTED = False
 705        SUPPORTS_TABLE_COPY = False
 706        COLLATE_IS_FUNC = True
 707        LIMIT_ONLY_LITERALS = True
 708        JSON_KEY_VALUE_PAIR_SEP = ","
 709        INSERT_OVERWRITE = " OVERWRITE INTO"
 710        STRUCT_DELIMITER = ("(", ")")
 711        COPY_PARAMS_ARE_WRAPPED = False
 712        COPY_PARAMS_EQ_REQUIRED = True
 713        STAR_EXCEPT = "EXCLUDE"
 714
 715        TRANSFORMS = {
 716            **generator.Generator.TRANSFORMS,
 717            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 718            exp.ArgMax: rename_func("MAX_BY"),
 719            exp.ArgMin: rename_func("MIN_BY"),
 720            exp.Array: inline_array_sql,
 721            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 722            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 723            exp.AtTimeZone: lambda self, e: self.func(
 724                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 725            ),
 726            exp.BitwiseXor: rename_func("BITXOR"),
 727            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 728            exp.DateAdd: date_delta_sql("DATEADD"),
 729            exp.DateDiff: date_delta_sql("DATEDIFF"),
 730            exp.DateStrToDate: datestrtodate_sql,
 731            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 732            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 733            exp.DayOfYear: rename_func("DAYOFYEAR"),
 734            exp.Explode: rename_func("FLATTEN"),
 735            exp.Extract: rename_func("DATE_PART"),
 736            exp.FromTimeZone: lambda self, e: self.func(
 737                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 738            ),
 739            exp.GenerateSeries: lambda self, e: self.func(
 740                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 741            ),
 742            exp.GroupConcat: rename_func("LISTAGG"),
 743            exp.If: if_sql(name="IFF", false_value="NULL"),
 744            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 745            exp.JSONExtractScalar: lambda self, e: self.func(
 746                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 747            ),
 748            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 749            exp.JSONPathRoot: lambda *_: "",
 750            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 751            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 752            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 753            exp.Max: max_or_greatest,
 754            exp.Min: min_or_least,
 755            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 756            exp.PercentileCont: transforms.preprocess(
 757                [transforms.add_within_group_for_percentiles]
 758            ),
 759            exp.PercentileDisc: transforms.preprocess(
 760                [transforms.add_within_group_for_percentiles]
 761            ),
 762            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 763            exp.RegexpILike: _regexpilike_sql,
 764            exp.Rand: rename_func("RANDOM"),
 765            exp.Select: transforms.preprocess(
 766                [
 767                    transforms.eliminate_distinct_on,
 768                    transforms.explode_to_unnest(),
 769                    transforms.eliminate_semi_and_anti_joins,
 770                ]
 771            ),
 772            exp.SHA: rename_func("SHA1"),
 773            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 774            exp.StartsWith: rename_func("STARTSWITH"),
 775            exp.StrPosition: lambda self, e: self.func(
 776                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 777            ),
 778            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 779            exp.Stuff: rename_func("INSERT"),
 780            exp.TimeAdd: date_delta_sql("TIMEADD"),
 781            exp.TimestampDiff: lambda self, e: self.func(
 782                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 783            ),
 784            exp.TimestampTrunc: timestamptrunc_sql(),
 785            exp.TimeStrToTime: timestrtotime_sql,
 786            exp.TimeToStr: lambda self, e: self.func(
 787                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 788            ),
 789            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 790            exp.ToArray: rename_func("TO_ARRAY"),
 791            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 792            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 793            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 794            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 795            exp.TsOrDsToDate: lambda self, e: self.func(
 796                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 797            ),
 798            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 799            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 800            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 801            exp.Xor: rename_func("BOOLXOR"),
 802        }
 803
 804        SUPPORTED_JSON_PATH_PARTS = {
 805            exp.JSONPathKey,
 806            exp.JSONPathRoot,
 807            exp.JSONPathSubscript,
 808        }
 809
 810        TYPE_MAPPING = {
 811            **generator.Generator.TYPE_MAPPING,
 812            exp.DataType.Type.NESTED: "OBJECT",
 813            exp.DataType.Type.STRUCT: "OBJECT",
 814        }
 815
 816        PROPERTIES_LOCATION = {
 817            **generator.Generator.PROPERTIES_LOCATION,
 818            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 819            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 820        }
 821
 822        UNSUPPORTED_VALUES_EXPRESSIONS = {
 823            exp.Map,
 824            exp.StarMap,
 825            exp.Struct,
 826            exp.VarMap,
 827        }
 828
 829        def with_properties(self, properties: exp.Properties) -> str:
 830            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 831
 832        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 833            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 834                values_as_table = False
 835
 836            return super().values_sql(expression, values_as_table=values_as_table)
 837
 838        def datatype_sql(self, expression: exp.DataType) -> str:
 839            expressions = expression.expressions
 840            if (
 841                expressions
 842                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 843                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 844            ):
 845                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 846                return "OBJECT"
 847
 848            return super().datatype_sql(expression)
 849
 850        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 851            return self.func(
 852                "TO_NUMBER",
 853                expression.this,
 854                expression.args.get("format"),
 855                expression.args.get("precision"),
 856                expression.args.get("scale"),
 857            )
 858
 859        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 860            milli = expression.args.get("milli")
 861            if milli is not None:
 862                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 863                expression.set("nano", milli_to_nano)
 864
 865            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 866
 867        def trycast_sql(self, expression: exp.TryCast) -> str:
 868            value = expression.this
 869
 870            if value.type is None:
 871                from sqlglot.optimizer.annotate_types import annotate_types
 872
 873                value = annotate_types(value)
 874
 875            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 876                return super().trycast_sql(expression)
 877
 878            # TRY_CAST only works for string values in Snowflake
 879            return self.cast_sql(expression)
 880
 881        def log_sql(self, expression: exp.Log) -> str:
 882            if not expression.expression:
 883                return self.func("LN", expression.this)
 884
 885            return super().log_sql(expression)
 886
 887        def unnest_sql(self, expression: exp.Unnest) -> str:
 888            unnest_alias = expression.args.get("alias")
 889            offset = expression.args.get("offset")
 890
 891            columns = [
 892                exp.to_identifier("seq"),
 893                exp.to_identifier("key"),
 894                exp.to_identifier("path"),
 895                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 896                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 897                or exp.to_identifier("value"),
 898                exp.to_identifier("this"),
 899            ]
 900
 901            if unnest_alias:
 902                unnest_alias.set("columns", columns)
 903            else:
 904                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 905
 906            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 907            alias = self.sql(unnest_alias)
 908            alias = f" AS {alias}" if alias else ""
 909            return f"{explode}{alias}"
 910
 911        def show_sql(self, expression: exp.Show) -> str:
 912            terse = "TERSE " if expression.args.get("terse") else ""
 913            history = " HISTORY" if expression.args.get("history") else ""
 914            like = self.sql(expression, "like")
 915            like = f" LIKE {like}" if like else ""
 916
 917            scope = self.sql(expression, "scope")
 918            scope = f" {scope}" if scope else ""
 919
 920            scope_kind = self.sql(expression, "scope_kind")
 921            if scope_kind:
 922                scope_kind = f" IN {scope_kind}"
 923
 924            starts_with = self.sql(expression, "starts_with")
 925            if starts_with:
 926                starts_with = f" STARTS WITH {starts_with}"
 927
 928            limit = self.sql(expression, "limit")
 929
 930            from_ = self.sql(expression, "from")
 931            if from_:
 932                from_ = f" FROM {from_}"
 933
 934            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 935
 936        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 937            # Other dialects don't support all of the following parameters, so we need to
 938            # generate default values as necessary to ensure the transpilation is correct
 939            group = expression.args.get("group")
 940            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 941            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 942            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 943
 944            return self.func(
 945                "REGEXP_SUBSTR",
 946                expression.this,
 947                expression.expression,
 948                position,
 949                occurrence,
 950                parameters,
 951                group,
 952            )
 953
 954        def except_op(self, expression: exp.Except) -> str:
 955            if not expression.args.get("distinct"):
 956                self.unsupported("EXCEPT with All is not supported in Snowflake")
 957            return super().except_op(expression)
 958
 959        def intersect_op(self, expression: exp.Intersect) -> str:
 960            if not expression.args.get("distinct"):
 961                self.unsupported("INTERSECT with All is not supported in Snowflake")
 962            return super().intersect_op(expression)
 963
 964        def describe_sql(self, expression: exp.Describe) -> str:
 965            # Default to table if kind is unknown
 966            kind_value = expression.args.get("kind") or "TABLE"
 967            kind = f" {kind_value}" if kind_value else ""
 968            this = f" {self.sql(expression, 'this')}"
 969            expressions = self.expressions(expression, flat=True)
 970            expressions = f" {expressions}" if expressions else ""
 971            return f"DESCRIBE{kind}{this}{expressions}"
 972
 973        def generatedasidentitycolumnconstraint_sql(
 974            self, expression: exp.GeneratedAsIdentityColumnConstraint
 975        ) -> str:
 976            start = expression.args.get("start")
 977            start = f" START {start}" if start else ""
 978            increment = expression.args.get("increment")
 979            increment = f" INCREMENT {increment}" if increment else ""
 980            return f"AUTOINCREMENT{start}{increment}"
 981
 982        def swaptable_sql(self, expression: exp.SwapTable) -> str:
 983            this = self.sql(expression, "this")
 984            return f"SWAP WITH {this}"
 985
 986        def cluster_sql(self, expression: exp.Cluster) -> str:
 987            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
 988
 989        def struct_sql(self, expression: exp.Struct) -> str:
 990            keys = []
 991            values = []
 992
 993            for i, e in enumerate(expression.expressions):
 994                if isinstance(e, exp.PropertyEQ):
 995                    keys.append(
 996                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
 997                    )
 998                    values.append(e.expression)
 999                else:
1000                    keys.append(exp.Literal.string(f"_{i}"))
1001                    values.append(e)
1002
1003            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1004
1005        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1006            if expression.args.get("weight") or expression.args.get("accuracy"):
1007                self.unsupported(
1008                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1009                )
1010
1011            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1012
1013        def alterset_sql(self, expression: exp.AlterSet) -> str:
1014            exprs = self.expressions(expression, flat=True)
1015            exprs = f" {exprs}" if exprs else ""
1016            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1017            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1018            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1019            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1020            tag = self.expressions(expression, key="tag", flat=True)
1021            tag = f" TAG {tag}" if tag else ""
1022
1023            return f"SET{exprs}{file_format}{copy_options}{tag}"
class Snowflake(sqlglot.dialects.dialect.Dialect):
 190class Snowflake(Dialect):
 191    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 192    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 193    NULL_ORDERING = "nulls_are_large"
 194    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 195    SUPPORTS_USER_DEFINED_TYPES = False
 196    SUPPORTS_SEMI_ANTI_JOIN = False
 197    PREFER_CTE_ALIAS_COLUMN = True
 198    TABLESAMPLE_SIZE_IS_PERCENT = True
 199    COPY_PARAMS_ARE_CSV = False
 200
 201    TIME_MAPPING = {
 202        "YYYY": "%Y",
 203        "yyyy": "%Y",
 204        "YY": "%y",
 205        "yy": "%y",
 206        "MMMM": "%B",
 207        "mmmm": "%B",
 208        "MON": "%b",
 209        "mon": "%b",
 210        "MM": "%m",
 211        "mm": "%m",
 212        "DD": "%d",
 213        "dd": "%-d",
 214        "DY": "%a",
 215        "dy": "%w",
 216        "HH24": "%H",
 217        "hh24": "%H",
 218        "HH12": "%I",
 219        "hh12": "%I",
 220        "MI": "%M",
 221        "mi": "%M",
 222        "SS": "%S",
 223        "ss": "%S",
 224        "FF": "%f",
 225        "ff": "%f",
 226        "FF6": "%f",
 227        "ff6": "%f",
 228    }
 229
 230    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 231        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 232        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 233        if (
 234            isinstance(expression, exp.Identifier)
 235            and isinstance(expression.parent, exp.Table)
 236            and expression.name.lower() == "dual"
 237        ):
 238            return expression  # type: ignore
 239
 240        return super().quote_identifier(expression, identify=identify)
 241
 242    class Parser(parser.Parser):
 243        IDENTIFY_PIVOT_STRINGS = True
 244        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 245        COLON_IS_JSON_EXTRACT = True
 246
 247        ID_VAR_TOKENS = {
 248            *parser.Parser.ID_VAR_TOKENS,
 249            TokenType.MATCH_CONDITION,
 250        }
 251
 252        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 253        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 254
 255        FUNCTIONS = {
 256            **parser.Parser.FUNCTIONS,
 257            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 258            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 259            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 260            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 261                this=seq_get(args, 1), expression=seq_get(args, 0)
 262            ),
 263            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 264                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 265                start=seq_get(args, 0),
 266                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 267                step=seq_get(args, 2),
 268            ),
 269            "BITXOR": binary_from_function(exp.BitwiseXor),
 270            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 271            "BOOLXOR": binary_from_function(exp.Xor),
 272            "CONVERT_TIMEZONE": _build_convert_timezone,
 273            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 274            "DATE_TRUNC": _date_trunc_to_time,
 275            "DATEADD": _build_date_time_add(exp.DateAdd),
 276            "DATEDIFF": _build_datediff,
 277            "DIV0": _build_if_from_div0,
 278            "FLATTEN": exp.Explode.from_arg_list,
 279            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 280                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 281            ),
 282            "IFF": exp.If.from_arg_list,
 283            "LAST_DAY": lambda args: exp.LastDay(
 284                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 285            ),
 286            "LISTAGG": exp.GroupConcat.from_arg_list,
 287            "MEDIAN": lambda args: exp.PercentileCont(
 288                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 289            ),
 290            "NULLIFZERO": _build_if_from_nullifzero,
 291            "OBJECT_CONSTRUCT": _build_object_construct,
 292            "REGEXP_REPLACE": _build_regexp_replace,
 293            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 294            "RLIKE": exp.RegexpLike.from_arg_list,
 295            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 296            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 297            "TIMEDIFF": _build_datediff,
 298            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 299            "TIMESTAMPDIFF": _build_datediff,
 300            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 301            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 302            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 303            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 304            "TO_NUMBER": lambda args: exp.ToNumber(
 305                this=seq_get(args, 0),
 306                format=seq_get(args, 1),
 307                precision=seq_get(args, 2),
 308                scale=seq_get(args, 3),
 309            ),
 310            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 311            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 312            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 313            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 314            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 315            "TO_VARCHAR": exp.ToChar.from_arg_list,
 316            "ZEROIFNULL": _build_if_from_zeroifnull,
 317        }
 318
 319        FUNCTION_PARSERS = {
 320            **parser.Parser.FUNCTION_PARSERS,
 321            "DATE_PART": lambda self: self._parse_date_part(),
 322            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 323        }
 324        FUNCTION_PARSERS.pop("TRIM")
 325
 326        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 327
 328        RANGE_PARSERS = {
 329            **parser.Parser.RANGE_PARSERS,
 330            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 331            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 332        }
 333
 334        ALTER_PARSERS = {
 335            **parser.Parser.ALTER_PARSERS,
 336            "UNSET": lambda self: self.expression(
 337                exp.Set,
 338                tag=self._match_text_seq("TAG"),
 339                expressions=self._parse_csv(self._parse_id_var),
 340                unset=True,
 341            ),
 342            "SWAP": lambda self: self._parse_alter_table_swap(),
 343        }
 344
 345        STATEMENT_PARSERS = {
 346            **parser.Parser.STATEMENT_PARSERS,
 347            TokenType.SHOW: lambda self: self._parse_show(),
 348        }
 349
 350        PROPERTY_PARSERS = {
 351            **parser.Parser.PROPERTY_PARSERS,
 352            "LOCATION": lambda self: self._parse_location_property(),
 353        }
 354
 355        TYPE_CONVERTERS = {
 356            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 357            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 358        }
 359
 360        SHOW_PARSERS = {
 361            "SCHEMAS": _show_parser("SCHEMAS"),
 362            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 363            "OBJECTS": _show_parser("OBJECTS"),
 364            "TERSE OBJECTS": _show_parser("OBJECTS"),
 365            "TABLES": _show_parser("TABLES"),
 366            "TERSE TABLES": _show_parser("TABLES"),
 367            "VIEWS": _show_parser("VIEWS"),
 368            "TERSE VIEWS": _show_parser("VIEWS"),
 369            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 370            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 371            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 372            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 373            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 374            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 375            "SEQUENCES": _show_parser("SEQUENCES"),
 376            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 377            "COLUMNS": _show_parser("COLUMNS"),
 378            "USERS": _show_parser("USERS"),
 379            "TERSE USERS": _show_parser("USERS"),
 380        }
 381
 382        CONSTRAINT_PARSERS = {
 383            **parser.Parser.CONSTRAINT_PARSERS,
 384            "WITH": lambda self: self._parse_with_constraint(),
 385            "MASKING": lambda self: self._parse_with_constraint(),
 386            "PROJECTION": lambda self: self._parse_with_constraint(),
 387            "TAG": lambda self: self._parse_with_constraint(),
 388        }
 389
 390        STAGED_FILE_SINGLE_TOKENS = {
 391            TokenType.DOT,
 392            TokenType.MOD,
 393            TokenType.SLASH,
 394        }
 395
 396        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 397
 398        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 399
 400        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 401
 402        LAMBDAS = {
 403            **parser.Parser.LAMBDAS,
 404            TokenType.ARROW: lambda self, expressions: self.expression(
 405                exp.Lambda,
 406                this=self._replace_lambda(
 407                    self._parse_assignment(),
 408                    expressions,
 409                ),
 410                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 411            ),
 412        }
 413
 414        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 415            if self._prev.token_type != TokenType.WITH:
 416                self._retreat(self._index - 1)
 417
 418            if self._match_text_seq("MASKING", "POLICY"):
 419                policy = self._parse_column()
 420                return self.expression(
 421                    exp.MaskingPolicyColumnConstraint,
 422                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 423                    expressions=self._match(TokenType.USING)
 424                    and self._parse_wrapped_csv(self._parse_id_var),
 425                )
 426            if self._match_text_seq("PROJECTION", "POLICY"):
 427                policy = self._parse_column()
 428                return self.expression(
 429                    exp.ProjectionPolicyColumnConstraint,
 430                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 431                )
 432            if self._match(TokenType.TAG):
 433                return self.expression(
 434                    exp.TagColumnConstraint,
 435                    expressions=self._parse_wrapped_csv(self._parse_property),
 436                )
 437
 438            return None
 439
 440        def _parse_create(self) -> exp.Create | exp.Command:
 441            expression = super()._parse_create()
 442            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 443                # Replace the Table node with the enclosed Identifier
 444                expression.this.replace(expression.this.this)
 445
 446            return expression
 447
 448        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 449        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 450        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 451            this = self._parse_var() or self._parse_type()
 452
 453            if not this:
 454                return None
 455
 456            self._match(TokenType.COMMA)
 457            expression = self._parse_bitwise()
 458            this = map_date_part(this)
 459            name = this.name.upper()
 460
 461            if name.startswith("EPOCH"):
 462                if name == "EPOCH_MILLISECOND":
 463                    scale = 10**3
 464                elif name == "EPOCH_MICROSECOND":
 465                    scale = 10**6
 466                elif name == "EPOCH_NANOSECOND":
 467                    scale = 10**9
 468                else:
 469                    scale = None
 470
 471                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 472                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 473
 474                if scale:
 475                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 476
 477                return to_unix
 478
 479            return self.expression(exp.Extract, this=this, expression=expression)
 480
 481        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 482            if is_map:
 483                # Keys are strings in Snowflake's objects, see also:
 484                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 485                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 486                return self._parse_slice(self._parse_string())
 487
 488            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 489
 490        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 491            lateral = super()._parse_lateral()
 492            if not lateral:
 493                return lateral
 494
 495            if isinstance(lateral.this, exp.Explode):
 496                table_alias = lateral.args.get("alias")
 497                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 498                if table_alias and not table_alias.args.get("columns"):
 499                    table_alias.set("columns", columns)
 500                elif not table_alias:
 501                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 502
 503            return lateral
 504
 505        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 506            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 507            index = self._index
 508            if self._match_texts(("AT", "BEFORE")):
 509                this = self._prev.text.upper()
 510                kind = (
 511                    self._match(TokenType.L_PAREN)
 512                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 513                    and self._prev.text.upper()
 514                )
 515                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 516
 517                if expression:
 518                    self._match_r_paren()
 519                    when = self.expression(
 520                        exp.HistoricalData, this=this, kind=kind, expression=expression
 521                    )
 522                    table.set("when", when)
 523                else:
 524                    self._retreat(index)
 525
 526            return table
 527
 528        def _parse_table_parts(
 529            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 530        ) -> exp.Table:
 531            # https://docs.snowflake.com/en/user-guide/querying-stage
 532            if self._match(TokenType.STRING, advance=False):
 533                table = self._parse_string()
 534            elif self._match_text_seq("@", advance=False):
 535                table = self._parse_location_path()
 536            else:
 537                table = None
 538
 539            if table:
 540                file_format = None
 541                pattern = None
 542
 543                wrapped = self._match(TokenType.L_PAREN)
 544                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 545                    if self._match_text_seq("FILE_FORMAT", "=>"):
 546                        file_format = self._parse_string() or super()._parse_table_parts(
 547                            is_db_reference=is_db_reference
 548                        )
 549                    elif self._match_text_seq("PATTERN", "=>"):
 550                        pattern = self._parse_string()
 551                    else:
 552                        break
 553
 554                    self._match(TokenType.COMMA)
 555
 556                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 557            else:
 558                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 559
 560            return self._parse_at_before(table)
 561
 562        def _parse_id_var(
 563            self,
 564            any_token: bool = True,
 565            tokens: t.Optional[t.Collection[TokenType]] = None,
 566        ) -> t.Optional[exp.Expression]:
 567            if self._match_text_seq("IDENTIFIER", "("):
 568                identifier = (
 569                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 570                    or self._parse_string()
 571                )
 572                self._match_r_paren()
 573                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 574
 575            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 576
 577        def _parse_show_snowflake(self, this: str) -> exp.Show:
 578            scope = None
 579            scope_kind = None
 580
 581            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 582            # which is syntactically valid but has no effect on the output
 583            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 584
 585            history = self._match_text_seq("HISTORY")
 586
 587            like = self._parse_string() if self._match(TokenType.LIKE) else None
 588
 589            if self._match(TokenType.IN):
 590                if self._match_text_seq("ACCOUNT"):
 591                    scope_kind = "ACCOUNT"
 592                elif self._match_set(self.DB_CREATABLES):
 593                    scope_kind = self._prev.text.upper()
 594                    if self._curr:
 595                        scope = self._parse_table_parts()
 596                elif self._curr:
 597                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 598                    scope = self._parse_table_parts()
 599
 600            return self.expression(
 601                exp.Show,
 602                **{
 603                    "terse": terse,
 604                    "this": this,
 605                    "history": history,
 606                    "like": like,
 607                    "scope": scope,
 608                    "scope_kind": scope_kind,
 609                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 610                    "limit": self._parse_limit(),
 611                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 612                },
 613            )
 614
 615        def _parse_alter_table_swap(self) -> exp.SwapTable:
 616            self._match_text_seq("WITH")
 617            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 618
 619        def _parse_location_property(self) -> exp.LocationProperty:
 620            self._match(TokenType.EQ)
 621            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 622
 623        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 624            # Parse either a subquery or a staged file
 625            return (
 626                self._parse_select(table=True, parse_subquery_alias=False)
 627                if self._match(TokenType.L_PAREN, advance=False)
 628                else self._parse_table_parts()
 629            )
 630
 631        def _parse_location_path(self) -> exp.Var:
 632            parts = [self._advance_any(ignore_reserved=True)]
 633
 634            # We avoid consuming a comma token because external tables like @foo and @bar
 635            # can be joined in a query with a comma separator, as well as closing paren
 636            # in case of subqueries
 637            while self._is_connected() and not self._match_set(
 638                (TokenType.COMMA, TokenType.R_PAREN), advance=False
 639            ):
 640                parts.append(self._advance_any(ignore_reserved=True))
 641
 642            return exp.var("".join(part.text for part in parts if part))
 643
 644        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 645            this = super()._parse_lambda_arg()
 646
 647            if not this:
 648                return this
 649
 650            typ = self._parse_types()
 651
 652            if typ:
 653                return self.expression(exp.Cast, this=this, to=typ)
 654
 655            return this
 656
 657    class Tokenizer(tokens.Tokenizer):
 658        STRING_ESCAPES = ["\\", "'"]
 659        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 660        RAW_STRINGS = ["$$"]
 661        COMMENTS = ["--", "//", ("/*", "*/")]
 662
 663        KEYWORDS = {
 664            **tokens.Tokenizer.KEYWORDS,
 665            "BYTEINT": TokenType.INT,
 666            "CHAR VARYING": TokenType.VARCHAR,
 667            "CHARACTER VARYING": TokenType.VARCHAR,
 668            "EXCLUDE": TokenType.EXCEPT,
 669            "ILIKE ANY": TokenType.ILIKE_ANY,
 670            "LIKE ANY": TokenType.LIKE_ANY,
 671            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 672            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 673            "MINUS": TokenType.EXCEPT,
 674            "NCHAR VARYING": TokenType.VARCHAR,
 675            "PUT": TokenType.COMMAND,
 676            "REMOVE": TokenType.COMMAND,
 677            "RM": TokenType.COMMAND,
 678            "SAMPLE": TokenType.TABLE_SAMPLE,
 679            "SQL_DOUBLE": TokenType.DOUBLE,
 680            "SQL_VARCHAR": TokenType.VARCHAR,
 681            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 682            "TAG": TokenType.TAG,
 683            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 684            "TOP": TokenType.TOP,
 685            "WAREHOUSE": TokenType.WAREHOUSE,
 686            "STREAMLIT": TokenType.STREAMLIT,
 687        }
 688
 689        SINGLE_TOKENS = {
 690            **tokens.Tokenizer.SINGLE_TOKENS,
 691            "$": TokenType.PARAMETER,
 692        }
 693
 694        VAR_SINGLE_TOKENS = {"$"}
 695
 696        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 697
 698    class Generator(generator.Generator):
 699        PARAMETER_TOKEN = "$"
 700        MATCHED_BY_SOURCE = False
 701        SINGLE_STRING_INTERVAL = True
 702        JOIN_HINTS = False
 703        TABLE_HINTS = False
 704        QUERY_HINTS = False
 705        AGGREGATE_FILTER_SUPPORTED = False
 706        SUPPORTS_TABLE_COPY = False
 707        COLLATE_IS_FUNC = True
 708        LIMIT_ONLY_LITERALS = True
 709        JSON_KEY_VALUE_PAIR_SEP = ","
 710        INSERT_OVERWRITE = " OVERWRITE INTO"
 711        STRUCT_DELIMITER = ("(", ")")
 712        COPY_PARAMS_ARE_WRAPPED = False
 713        COPY_PARAMS_EQ_REQUIRED = True
 714        STAR_EXCEPT = "EXCLUDE"
 715
 716        TRANSFORMS = {
 717            **generator.Generator.TRANSFORMS,
 718            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 719            exp.ArgMax: rename_func("MAX_BY"),
 720            exp.ArgMin: rename_func("MIN_BY"),
 721            exp.Array: inline_array_sql,
 722            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 723            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 724            exp.AtTimeZone: lambda self, e: self.func(
 725                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 726            ),
 727            exp.BitwiseXor: rename_func("BITXOR"),
 728            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 729            exp.DateAdd: date_delta_sql("DATEADD"),
 730            exp.DateDiff: date_delta_sql("DATEDIFF"),
 731            exp.DateStrToDate: datestrtodate_sql,
 732            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 733            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 734            exp.DayOfYear: rename_func("DAYOFYEAR"),
 735            exp.Explode: rename_func("FLATTEN"),
 736            exp.Extract: rename_func("DATE_PART"),
 737            exp.FromTimeZone: lambda self, e: self.func(
 738                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 739            ),
 740            exp.GenerateSeries: lambda self, e: self.func(
 741                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 742            ),
 743            exp.GroupConcat: rename_func("LISTAGG"),
 744            exp.If: if_sql(name="IFF", false_value="NULL"),
 745            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 746            exp.JSONExtractScalar: lambda self, e: self.func(
 747                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 748            ),
 749            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 750            exp.JSONPathRoot: lambda *_: "",
 751            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 752            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 753            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 754            exp.Max: max_or_greatest,
 755            exp.Min: min_or_least,
 756            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 757            exp.PercentileCont: transforms.preprocess(
 758                [transforms.add_within_group_for_percentiles]
 759            ),
 760            exp.PercentileDisc: transforms.preprocess(
 761                [transforms.add_within_group_for_percentiles]
 762            ),
 763            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 764            exp.RegexpILike: _regexpilike_sql,
 765            exp.Rand: rename_func("RANDOM"),
 766            exp.Select: transforms.preprocess(
 767                [
 768                    transforms.eliminate_distinct_on,
 769                    transforms.explode_to_unnest(),
 770                    transforms.eliminate_semi_and_anti_joins,
 771                ]
 772            ),
 773            exp.SHA: rename_func("SHA1"),
 774            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 775            exp.StartsWith: rename_func("STARTSWITH"),
 776            exp.StrPosition: lambda self, e: self.func(
 777                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 778            ),
 779            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 780            exp.Stuff: rename_func("INSERT"),
 781            exp.TimeAdd: date_delta_sql("TIMEADD"),
 782            exp.TimestampDiff: lambda self, e: self.func(
 783                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 784            ),
 785            exp.TimestampTrunc: timestamptrunc_sql(),
 786            exp.TimeStrToTime: timestrtotime_sql,
 787            exp.TimeToStr: lambda self, e: self.func(
 788                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 789            ),
 790            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 791            exp.ToArray: rename_func("TO_ARRAY"),
 792            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 793            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 794            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 795            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 796            exp.TsOrDsToDate: lambda self, e: self.func(
 797                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 798            ),
 799            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 800            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 801            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 802            exp.Xor: rename_func("BOOLXOR"),
 803        }
 804
 805        SUPPORTED_JSON_PATH_PARTS = {
 806            exp.JSONPathKey,
 807            exp.JSONPathRoot,
 808            exp.JSONPathSubscript,
 809        }
 810
 811        TYPE_MAPPING = {
 812            **generator.Generator.TYPE_MAPPING,
 813            exp.DataType.Type.NESTED: "OBJECT",
 814            exp.DataType.Type.STRUCT: "OBJECT",
 815        }
 816
 817        PROPERTIES_LOCATION = {
 818            **generator.Generator.PROPERTIES_LOCATION,
 819            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 820            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 821        }
 822
 823        UNSUPPORTED_VALUES_EXPRESSIONS = {
 824            exp.Map,
 825            exp.StarMap,
 826            exp.Struct,
 827            exp.VarMap,
 828        }
 829
 830        def with_properties(self, properties: exp.Properties) -> str:
 831            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 832
 833        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 834            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 835                values_as_table = False
 836
 837            return super().values_sql(expression, values_as_table=values_as_table)
 838
 839        def datatype_sql(self, expression: exp.DataType) -> str:
 840            expressions = expression.expressions
 841            if (
 842                expressions
 843                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 844                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 845            ):
 846                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 847                return "OBJECT"
 848
 849            return super().datatype_sql(expression)
 850
 851        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 852            return self.func(
 853                "TO_NUMBER",
 854                expression.this,
 855                expression.args.get("format"),
 856                expression.args.get("precision"),
 857                expression.args.get("scale"),
 858            )
 859
 860        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 861            milli = expression.args.get("milli")
 862            if milli is not None:
 863                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 864                expression.set("nano", milli_to_nano)
 865
 866            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 867
 868        def trycast_sql(self, expression: exp.TryCast) -> str:
 869            value = expression.this
 870
 871            if value.type is None:
 872                from sqlglot.optimizer.annotate_types import annotate_types
 873
 874                value = annotate_types(value)
 875
 876            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 877                return super().trycast_sql(expression)
 878
 879            # TRY_CAST only works for string values in Snowflake
 880            return self.cast_sql(expression)
 881
 882        def log_sql(self, expression: exp.Log) -> str:
 883            if not expression.expression:
 884                return self.func("LN", expression.this)
 885
 886            return super().log_sql(expression)
 887
 888        def unnest_sql(self, expression: exp.Unnest) -> str:
 889            unnest_alias = expression.args.get("alias")
 890            offset = expression.args.get("offset")
 891
 892            columns = [
 893                exp.to_identifier("seq"),
 894                exp.to_identifier("key"),
 895                exp.to_identifier("path"),
 896                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 897                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 898                or exp.to_identifier("value"),
 899                exp.to_identifier("this"),
 900            ]
 901
 902            if unnest_alias:
 903                unnest_alias.set("columns", columns)
 904            else:
 905                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 906
 907            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 908            alias = self.sql(unnest_alias)
 909            alias = f" AS {alias}" if alias else ""
 910            return f"{explode}{alias}"
 911
 912        def show_sql(self, expression: exp.Show) -> str:
 913            terse = "TERSE " if expression.args.get("terse") else ""
 914            history = " HISTORY" if expression.args.get("history") else ""
 915            like = self.sql(expression, "like")
 916            like = f" LIKE {like}" if like else ""
 917
 918            scope = self.sql(expression, "scope")
 919            scope = f" {scope}" if scope else ""
 920
 921            scope_kind = self.sql(expression, "scope_kind")
 922            if scope_kind:
 923                scope_kind = f" IN {scope_kind}"
 924
 925            starts_with = self.sql(expression, "starts_with")
 926            if starts_with:
 927                starts_with = f" STARTS WITH {starts_with}"
 928
 929            limit = self.sql(expression, "limit")
 930
 931            from_ = self.sql(expression, "from")
 932            if from_:
 933                from_ = f" FROM {from_}"
 934
 935            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 936
 937        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 938            # Other dialects don't support all of the following parameters, so we need to
 939            # generate default values as necessary to ensure the transpilation is correct
 940            group = expression.args.get("group")
 941            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 942            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 943            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 944
 945            return self.func(
 946                "REGEXP_SUBSTR",
 947                expression.this,
 948                expression.expression,
 949                position,
 950                occurrence,
 951                parameters,
 952                group,
 953            )
 954
 955        def except_op(self, expression: exp.Except) -> str:
 956            if not expression.args.get("distinct"):
 957                self.unsupported("EXCEPT with All is not supported in Snowflake")
 958            return super().except_op(expression)
 959
 960        def intersect_op(self, expression: exp.Intersect) -> str:
 961            if not expression.args.get("distinct"):
 962                self.unsupported("INTERSECT with All is not supported in Snowflake")
 963            return super().intersect_op(expression)
 964
 965        def describe_sql(self, expression: exp.Describe) -> str:
 966            # Default to table if kind is unknown
 967            kind_value = expression.args.get("kind") or "TABLE"
 968            kind = f" {kind_value}" if kind_value else ""
 969            this = f" {self.sql(expression, 'this')}"
 970            expressions = self.expressions(expression, flat=True)
 971            expressions = f" {expressions}" if expressions else ""
 972            return f"DESCRIBE{kind}{this}{expressions}"
 973
 974        def generatedasidentitycolumnconstraint_sql(
 975            self, expression: exp.GeneratedAsIdentityColumnConstraint
 976        ) -> str:
 977            start = expression.args.get("start")
 978            start = f" START {start}" if start else ""
 979            increment = expression.args.get("increment")
 980            increment = f" INCREMENT {increment}" if increment else ""
 981            return f"AUTOINCREMENT{start}{increment}"
 982
 983        def swaptable_sql(self, expression: exp.SwapTable) -> str:
 984            this = self.sql(expression, "this")
 985            return f"SWAP WITH {this}"
 986
 987        def cluster_sql(self, expression: exp.Cluster) -> str:
 988            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
 989
 990        def struct_sql(self, expression: exp.Struct) -> str:
 991            keys = []
 992            values = []
 993
 994            for i, e in enumerate(expression.expressions):
 995                if isinstance(e, exp.PropertyEQ):
 996                    keys.append(
 997                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
 998                    )
 999                    values.append(e.expression)
1000                else:
1001                    keys.append(exp.Literal.string(f"_{i}"))
1002                    values.append(e)
1003
1004            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1005
1006        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1007            if expression.args.get("weight") or expression.args.get("accuracy"):
1008                self.unsupported(
1009                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1010                )
1011
1012            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1013
1014        def alterset_sql(self, expression: exp.AlterSet) -> str:
1015            exprs = self.expressions(expression, flat=True)
1016            exprs = f" {exprs}" if exprs else ""
1017            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1018            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1019            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1020            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1021            tag = self.expressions(expression, key="tag", flat=True)
1022            tag = f" TAG {tag}" if tag else ""
1023
1024            return f"SET{exprs}{file_format}{copy_options}{tag}"
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
230    def quote_identifier(self, expression: E, identify: bool = True) -> E:
231        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
232        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
233        if (
234            isinstance(expression, exp.Identifier)
235            and isinstance(expression.parent, exp.Table)
236            and expression.name.lower() == "dual"
237        ):
238            return expression  # type: ignore
239
240        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.tokens.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
242    class Parser(parser.Parser):
243        IDENTIFY_PIVOT_STRINGS = True
244        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
245        COLON_IS_JSON_EXTRACT = True
246
247        ID_VAR_TOKENS = {
248            *parser.Parser.ID_VAR_TOKENS,
249            TokenType.MATCH_CONDITION,
250        }
251
252        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
253        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
254
255        FUNCTIONS = {
256            **parser.Parser.FUNCTIONS,
257            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
258            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
259            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
260            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
261                this=seq_get(args, 1), expression=seq_get(args, 0)
262            ),
263            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
264                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
265                start=seq_get(args, 0),
266                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
267                step=seq_get(args, 2),
268            ),
269            "BITXOR": binary_from_function(exp.BitwiseXor),
270            "BIT_XOR": binary_from_function(exp.BitwiseXor),
271            "BOOLXOR": binary_from_function(exp.Xor),
272            "CONVERT_TIMEZONE": _build_convert_timezone,
273            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
274            "DATE_TRUNC": _date_trunc_to_time,
275            "DATEADD": _build_date_time_add(exp.DateAdd),
276            "DATEDIFF": _build_datediff,
277            "DIV0": _build_if_from_div0,
278            "FLATTEN": exp.Explode.from_arg_list,
279            "GET_PATH": lambda args, dialect: exp.JSONExtract(
280                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
281            ),
282            "IFF": exp.If.from_arg_list,
283            "LAST_DAY": lambda args: exp.LastDay(
284                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
285            ),
286            "LISTAGG": exp.GroupConcat.from_arg_list,
287            "MEDIAN": lambda args: exp.PercentileCont(
288                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
289            ),
290            "NULLIFZERO": _build_if_from_nullifzero,
291            "OBJECT_CONSTRUCT": _build_object_construct,
292            "REGEXP_REPLACE": _build_regexp_replace,
293            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
294            "RLIKE": exp.RegexpLike.from_arg_list,
295            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
296            "TIMEADD": _build_date_time_add(exp.TimeAdd),
297            "TIMEDIFF": _build_datediff,
298            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
299            "TIMESTAMPDIFF": _build_datediff,
300            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
301            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
302            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
303            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
304            "TO_NUMBER": lambda args: exp.ToNumber(
305                this=seq_get(args, 0),
306                format=seq_get(args, 1),
307                precision=seq_get(args, 2),
308                scale=seq_get(args, 3),
309            ),
310            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
311            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
312            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
313            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
314            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
315            "TO_VARCHAR": exp.ToChar.from_arg_list,
316            "ZEROIFNULL": _build_if_from_zeroifnull,
317        }
318
319        FUNCTION_PARSERS = {
320            **parser.Parser.FUNCTION_PARSERS,
321            "DATE_PART": lambda self: self._parse_date_part(),
322            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
323        }
324        FUNCTION_PARSERS.pop("TRIM")
325
326        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
327
328        RANGE_PARSERS = {
329            **parser.Parser.RANGE_PARSERS,
330            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
331            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
332        }
333
334        ALTER_PARSERS = {
335            **parser.Parser.ALTER_PARSERS,
336            "UNSET": lambda self: self.expression(
337                exp.Set,
338                tag=self._match_text_seq("TAG"),
339                expressions=self._parse_csv(self._parse_id_var),
340                unset=True,
341            ),
342            "SWAP": lambda self: self._parse_alter_table_swap(),
343        }
344
345        STATEMENT_PARSERS = {
346            **parser.Parser.STATEMENT_PARSERS,
347            TokenType.SHOW: lambda self: self._parse_show(),
348        }
349
350        PROPERTY_PARSERS = {
351            **parser.Parser.PROPERTY_PARSERS,
352            "LOCATION": lambda self: self._parse_location_property(),
353        }
354
355        TYPE_CONVERTERS = {
356            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
357            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
358        }
359
360        SHOW_PARSERS = {
361            "SCHEMAS": _show_parser("SCHEMAS"),
362            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
363            "OBJECTS": _show_parser("OBJECTS"),
364            "TERSE OBJECTS": _show_parser("OBJECTS"),
365            "TABLES": _show_parser("TABLES"),
366            "TERSE TABLES": _show_parser("TABLES"),
367            "VIEWS": _show_parser("VIEWS"),
368            "TERSE VIEWS": _show_parser("VIEWS"),
369            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
370            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
371            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
372            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
373            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
374            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
375            "SEQUENCES": _show_parser("SEQUENCES"),
376            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
377            "COLUMNS": _show_parser("COLUMNS"),
378            "USERS": _show_parser("USERS"),
379            "TERSE USERS": _show_parser("USERS"),
380        }
381
382        CONSTRAINT_PARSERS = {
383            **parser.Parser.CONSTRAINT_PARSERS,
384            "WITH": lambda self: self._parse_with_constraint(),
385            "MASKING": lambda self: self._parse_with_constraint(),
386            "PROJECTION": lambda self: self._parse_with_constraint(),
387            "TAG": lambda self: self._parse_with_constraint(),
388        }
389
390        STAGED_FILE_SINGLE_TOKENS = {
391            TokenType.DOT,
392            TokenType.MOD,
393            TokenType.SLASH,
394        }
395
396        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
397
398        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
399
400        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
401
402        LAMBDAS = {
403            **parser.Parser.LAMBDAS,
404            TokenType.ARROW: lambda self, expressions: self.expression(
405                exp.Lambda,
406                this=self._replace_lambda(
407                    self._parse_assignment(),
408                    expressions,
409                ),
410                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
411            ),
412        }
413
414        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
415            if self._prev.token_type != TokenType.WITH:
416                self._retreat(self._index - 1)
417
418            if self._match_text_seq("MASKING", "POLICY"):
419                policy = self._parse_column()
420                return self.expression(
421                    exp.MaskingPolicyColumnConstraint,
422                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
423                    expressions=self._match(TokenType.USING)
424                    and self._parse_wrapped_csv(self._parse_id_var),
425                )
426            if self._match_text_seq("PROJECTION", "POLICY"):
427                policy = self._parse_column()
428                return self.expression(
429                    exp.ProjectionPolicyColumnConstraint,
430                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
431                )
432            if self._match(TokenType.TAG):
433                return self.expression(
434                    exp.TagColumnConstraint,
435                    expressions=self._parse_wrapped_csv(self._parse_property),
436                )
437
438            return None
439
440        def _parse_create(self) -> exp.Create | exp.Command:
441            expression = super()._parse_create()
442            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
443                # Replace the Table node with the enclosed Identifier
444                expression.this.replace(expression.this.this)
445
446            return expression
447
448        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
449        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
450        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
451            this = self._parse_var() or self._parse_type()
452
453            if not this:
454                return None
455
456            self._match(TokenType.COMMA)
457            expression = self._parse_bitwise()
458            this = map_date_part(this)
459            name = this.name.upper()
460
461            if name.startswith("EPOCH"):
462                if name == "EPOCH_MILLISECOND":
463                    scale = 10**3
464                elif name == "EPOCH_MICROSECOND":
465                    scale = 10**6
466                elif name == "EPOCH_NANOSECOND":
467                    scale = 10**9
468                else:
469                    scale = None
470
471                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
472                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
473
474                if scale:
475                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
476
477                return to_unix
478
479            return self.expression(exp.Extract, this=this, expression=expression)
480
481        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
482            if is_map:
483                # Keys are strings in Snowflake's objects, see also:
484                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
485                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
486                return self._parse_slice(self._parse_string())
487
488            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
489
490        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
491            lateral = super()._parse_lateral()
492            if not lateral:
493                return lateral
494
495            if isinstance(lateral.this, exp.Explode):
496                table_alias = lateral.args.get("alias")
497                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
498                if table_alias and not table_alias.args.get("columns"):
499                    table_alias.set("columns", columns)
500                elif not table_alias:
501                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
502
503            return lateral
504
505        def _parse_at_before(self, table: exp.Table) -> exp.Table:
506            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
507            index = self._index
508            if self._match_texts(("AT", "BEFORE")):
509                this = self._prev.text.upper()
510                kind = (
511                    self._match(TokenType.L_PAREN)
512                    and self._match_texts(self.HISTORICAL_DATA_KIND)
513                    and self._prev.text.upper()
514                )
515                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
516
517                if expression:
518                    self._match_r_paren()
519                    when = self.expression(
520                        exp.HistoricalData, this=this, kind=kind, expression=expression
521                    )
522                    table.set("when", when)
523                else:
524                    self._retreat(index)
525
526            return table
527
528        def _parse_table_parts(
529            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
530        ) -> exp.Table:
531            # https://docs.snowflake.com/en/user-guide/querying-stage
532            if self._match(TokenType.STRING, advance=False):
533                table = self._parse_string()
534            elif self._match_text_seq("@", advance=False):
535                table = self._parse_location_path()
536            else:
537                table = None
538
539            if table:
540                file_format = None
541                pattern = None
542
543                wrapped = self._match(TokenType.L_PAREN)
544                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
545                    if self._match_text_seq("FILE_FORMAT", "=>"):
546                        file_format = self._parse_string() or super()._parse_table_parts(
547                            is_db_reference=is_db_reference
548                        )
549                    elif self._match_text_seq("PATTERN", "=>"):
550                        pattern = self._parse_string()
551                    else:
552                        break
553
554                    self._match(TokenType.COMMA)
555
556                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
557            else:
558                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
559
560            return self._parse_at_before(table)
561
562        def _parse_id_var(
563            self,
564            any_token: bool = True,
565            tokens: t.Optional[t.Collection[TokenType]] = None,
566        ) -> t.Optional[exp.Expression]:
567            if self._match_text_seq("IDENTIFIER", "("):
568                identifier = (
569                    super()._parse_id_var(any_token=any_token, tokens=tokens)
570                    or self._parse_string()
571                )
572                self._match_r_paren()
573                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
574
575            return super()._parse_id_var(any_token=any_token, tokens=tokens)
576
577        def _parse_show_snowflake(self, this: str) -> exp.Show:
578            scope = None
579            scope_kind = None
580
581            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
582            # which is syntactically valid but has no effect on the output
583            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
584
585            history = self._match_text_seq("HISTORY")
586
587            like = self._parse_string() if self._match(TokenType.LIKE) else None
588
589            if self._match(TokenType.IN):
590                if self._match_text_seq("ACCOUNT"):
591                    scope_kind = "ACCOUNT"
592                elif self._match_set(self.DB_CREATABLES):
593                    scope_kind = self._prev.text.upper()
594                    if self._curr:
595                        scope = self._parse_table_parts()
596                elif self._curr:
597                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
598                    scope = self._parse_table_parts()
599
600            return self.expression(
601                exp.Show,
602                **{
603                    "terse": terse,
604                    "this": this,
605                    "history": history,
606                    "like": like,
607                    "scope": scope,
608                    "scope_kind": scope_kind,
609                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
610                    "limit": self._parse_limit(),
611                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
612                },
613            )
614
615        def _parse_alter_table_swap(self) -> exp.SwapTable:
616            self._match_text_seq("WITH")
617            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
618
619        def _parse_location_property(self) -> exp.LocationProperty:
620            self._match(TokenType.EQ)
621            return self.expression(exp.LocationProperty, this=self._parse_location_path())
622
623        def _parse_file_location(self) -> t.Optional[exp.Expression]:
624            # Parse either a subquery or a staged file
625            return (
626                self._parse_select(table=True, parse_subquery_alias=False)
627                if self._match(TokenType.L_PAREN, advance=False)
628                else self._parse_table_parts()
629            )
630
631        def _parse_location_path(self) -> exp.Var:
632            parts = [self._advance_any(ignore_reserved=True)]
633
634            # We avoid consuming a comma token because external tables like @foo and @bar
635            # can be joined in a query with a comma separator, as well as closing paren
636            # in case of subqueries
637            while self._is_connected() and not self._match_set(
638                (TokenType.COMMA, TokenType.R_PAREN), advance=False
639            ):
640                parts.append(self._advance_any(ignore_reserved=True))
641
642            return exp.var("".join(part.text for part in parts if part))
643
644        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
645            this = super()._parse_lambda_arg()
646
647            if not this:
648                return this
649
650            typ = self._parse_types()
651
652            if typ:
653                return self.expression(exp.Cast, this=this, to=typ)
654
655            return this

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_JSON_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.SERIAL: 'SERIAL'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.MAP: 'MAP'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.NESTED: 'NESTED'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.TIME: 'TIME'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.SUPER: 'SUPER'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.VIEW: 'VIEW'>, <TokenType.FALSE: 'FALSE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.FIRST: 'FIRST'>, <TokenType.JSONB: 'JSONB'>, <TokenType.NULL: 'NULL'>, <TokenType.UINT: 'UINT'>, <TokenType.INET: 'INET'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.USE: 'USE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.INT: 'INT'>, <TokenType.IPV6: 'IPV6'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.CASE: 'CASE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.KILL: 'KILL'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.FINAL: 'FINAL'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.DELETE: 'DELETE'>, <TokenType.VAR: 'VAR'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TEXT: 'TEXT'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.ROW: 'ROW'>, <TokenType.NAME: 'NAME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.TRUE: 'TRUE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.ROWS: 'ROWS'>, <TokenType.XML: 'XML'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.ANY: 'ANY'>, <TokenType.MONEY: 'MONEY'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.UINT256: 'UINT256'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.LIST: 'LIST'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.DATE: 'DATE'>, <TokenType.INT256: 'INT256'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TOP: 'TOP'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.FULL: 'FULL'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.ANTI: 'ANTI'>, <TokenType.COPY: 'COPY'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.YEAR: 'YEAR'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.RANGE: 'RANGE'>, <TokenType.INT128: 'INT128'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ASC: 'ASC'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.ASOF: 'ASOF'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.SEMI: 'SEMI'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.TAG: 'TAG'>, <TokenType.SOME: 'SOME'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.MODEL: 'MODEL'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.IPV4: 'IPV4'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.APPLY: 'APPLY'>, <TokenType.BINARY: 'BINARY'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.SET: 'SET'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.END: 'END'>, <TokenType.CHAR: 'CHAR'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.LEFT: 'LEFT'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.UUID: 'UUID'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.IS: 'IS'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>}
TABLE_ALIAS_TOKENS = {<TokenType.SERIAL: 'SERIAL'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.MAP: 'MAP'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.NESTED: 'NESTED'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.TIME: 'TIME'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.SUPER: 'SUPER'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.VIEW: 'VIEW'>, <TokenType.FALSE: 'FALSE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.FIRST: 'FIRST'>, <TokenType.NULL: 'NULL'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.UINT: 'UINT'>, <TokenType.INET: 'INET'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.USE: 'USE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.INT: 'INT'>, <TokenType.IPV6: 'IPV6'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.CASE: 'CASE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.KILL: 'KILL'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.FINAL: 'FINAL'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.DELETE: 'DELETE'>, <TokenType.VAR: 'VAR'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TEXT: 'TEXT'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.ROW: 'ROW'>, <TokenType.NAME: 'NAME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.TRUE: 'TRUE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.ROWS: 'ROWS'>, <TokenType.XML: 'XML'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.ANY: 'ANY'>, <TokenType.MONEY: 'MONEY'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.UINT256: 'UINT256'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.LIST: 'LIST'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.INT256: 'INT256'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TOP: 'TOP'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.ANTI: 'ANTI'>, <TokenType.COPY: 'COPY'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.YEAR: 'YEAR'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.RANGE: 'RANGE'>, <TokenType.INT128: 'INT128'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ASC: 'ASC'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.SEMI: 'SEMI'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.TAG: 'TAG'>, <TokenType.SOME: 'SOME'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.MODEL: 'MODEL'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.IPV4: 'IPV4'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.BINARY: 'BINARY'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.SET: 'SET'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.END: 'END'>, <TokenType.CHAR: 'CHAR'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.UUID: 'UUID'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.IS: 'IS'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.DATE: 'DATE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateDateArray'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAYAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_CONSTRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'CONVERT_TIMEZONE': <function _build_convert_timezone>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'MEDIAN': <function Snowflake.Parser.<lambda>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>, 'SWAP': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.DOT: 'DOT'>, <TokenType.SLASH: 'SLASH'>, <TokenType.MOD: 'MOD'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'OBJECTS', 'IMPORTED KEYS', 'VIEWS', 'TABLES', 'UNIQUE KEYS', 'SEQUENCES'}
NON_TABLE_CREATABLES = {'TAG', 'WAREHOUSE', 'STORAGE INTEGRATION', 'STREAMLIT'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
INTERVAL_VARS
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
657    class Tokenizer(tokens.Tokenizer):
658        STRING_ESCAPES = ["\\", "'"]
659        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
660        RAW_STRINGS = ["$$"]
661        COMMENTS = ["--", "//", ("/*", "*/")]
662
663        KEYWORDS = {
664            **tokens.Tokenizer.KEYWORDS,
665            "BYTEINT": TokenType.INT,
666            "CHAR VARYING": TokenType.VARCHAR,
667            "CHARACTER VARYING": TokenType.VARCHAR,
668            "EXCLUDE": TokenType.EXCEPT,
669            "ILIKE ANY": TokenType.ILIKE_ANY,
670            "LIKE ANY": TokenType.LIKE_ANY,
671            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
672            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
673            "MINUS": TokenType.EXCEPT,
674            "NCHAR VARYING": TokenType.VARCHAR,
675            "PUT": TokenType.COMMAND,
676            "REMOVE": TokenType.COMMAND,
677            "RM": TokenType.COMMAND,
678            "SAMPLE": TokenType.TABLE_SAMPLE,
679            "SQL_DOUBLE": TokenType.DOUBLE,
680            "SQL_VARCHAR": TokenType.VARCHAR,
681            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
682            "TAG": TokenType.TAG,
683            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
684            "TOP": TokenType.TOP,
685            "WAREHOUSE": TokenType.WAREHOUSE,
686            "STREAMLIT": TokenType.STREAMLIT,
687        }
688
689        SINGLE_TOKENS = {
690            **tokens.Tokenizer.SINGLE_TOKENS,
691            "$": TokenType.PARAMETER,
692        }
693
694        VAR_SINGLE_TOKENS = {"$"}
695
696        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.FETCH: 'FETCH'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.COMMAND: 'COMMAND'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 698    class Generator(generator.Generator):
 699        PARAMETER_TOKEN = "$"
 700        MATCHED_BY_SOURCE = False
 701        SINGLE_STRING_INTERVAL = True
 702        JOIN_HINTS = False
 703        TABLE_HINTS = False
 704        QUERY_HINTS = False
 705        AGGREGATE_FILTER_SUPPORTED = False
 706        SUPPORTS_TABLE_COPY = False
 707        COLLATE_IS_FUNC = True
 708        LIMIT_ONLY_LITERALS = True
 709        JSON_KEY_VALUE_PAIR_SEP = ","
 710        INSERT_OVERWRITE = " OVERWRITE INTO"
 711        STRUCT_DELIMITER = ("(", ")")
 712        COPY_PARAMS_ARE_WRAPPED = False
 713        COPY_PARAMS_EQ_REQUIRED = True
 714        STAR_EXCEPT = "EXCLUDE"
 715
 716        TRANSFORMS = {
 717            **generator.Generator.TRANSFORMS,
 718            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 719            exp.ArgMax: rename_func("MAX_BY"),
 720            exp.ArgMin: rename_func("MIN_BY"),
 721            exp.Array: inline_array_sql,
 722            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 723            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 724            exp.AtTimeZone: lambda self, e: self.func(
 725                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 726            ),
 727            exp.BitwiseXor: rename_func("BITXOR"),
 728            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 729            exp.DateAdd: date_delta_sql("DATEADD"),
 730            exp.DateDiff: date_delta_sql("DATEDIFF"),
 731            exp.DateStrToDate: datestrtodate_sql,
 732            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 733            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 734            exp.DayOfYear: rename_func("DAYOFYEAR"),
 735            exp.Explode: rename_func("FLATTEN"),
 736            exp.Extract: rename_func("DATE_PART"),
 737            exp.FromTimeZone: lambda self, e: self.func(
 738                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 739            ),
 740            exp.GenerateSeries: lambda self, e: self.func(
 741                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 742            ),
 743            exp.GroupConcat: rename_func("LISTAGG"),
 744            exp.If: if_sql(name="IFF", false_value="NULL"),
 745            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 746            exp.JSONExtractScalar: lambda self, e: self.func(
 747                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 748            ),
 749            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 750            exp.JSONPathRoot: lambda *_: "",
 751            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 752            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 753            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 754            exp.Max: max_or_greatest,
 755            exp.Min: min_or_least,
 756            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 757            exp.PercentileCont: transforms.preprocess(
 758                [transforms.add_within_group_for_percentiles]
 759            ),
 760            exp.PercentileDisc: transforms.preprocess(
 761                [transforms.add_within_group_for_percentiles]
 762            ),
 763            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 764            exp.RegexpILike: _regexpilike_sql,
 765            exp.Rand: rename_func("RANDOM"),
 766            exp.Select: transforms.preprocess(
 767                [
 768                    transforms.eliminate_distinct_on,
 769                    transforms.explode_to_unnest(),
 770                    transforms.eliminate_semi_and_anti_joins,
 771                ]
 772            ),
 773            exp.SHA: rename_func("SHA1"),
 774            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 775            exp.StartsWith: rename_func("STARTSWITH"),
 776            exp.StrPosition: lambda self, e: self.func(
 777                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 778            ),
 779            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 780            exp.Stuff: rename_func("INSERT"),
 781            exp.TimeAdd: date_delta_sql("TIMEADD"),
 782            exp.TimestampDiff: lambda self, e: self.func(
 783                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 784            ),
 785            exp.TimestampTrunc: timestamptrunc_sql(),
 786            exp.TimeStrToTime: timestrtotime_sql,
 787            exp.TimeToStr: lambda self, e: self.func(
 788                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 789            ),
 790            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 791            exp.ToArray: rename_func("TO_ARRAY"),
 792            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 793            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 794            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 795            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 796            exp.TsOrDsToDate: lambda self, e: self.func(
 797                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 798            ),
 799            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 800            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 801            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 802            exp.Xor: rename_func("BOOLXOR"),
 803        }
 804
 805        SUPPORTED_JSON_PATH_PARTS = {
 806            exp.JSONPathKey,
 807            exp.JSONPathRoot,
 808            exp.JSONPathSubscript,
 809        }
 810
 811        TYPE_MAPPING = {
 812            **generator.Generator.TYPE_MAPPING,
 813            exp.DataType.Type.NESTED: "OBJECT",
 814            exp.DataType.Type.STRUCT: "OBJECT",
 815        }
 816
 817        PROPERTIES_LOCATION = {
 818            **generator.Generator.PROPERTIES_LOCATION,
 819            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 820            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 821        }
 822
 823        UNSUPPORTED_VALUES_EXPRESSIONS = {
 824            exp.Map,
 825            exp.StarMap,
 826            exp.Struct,
 827            exp.VarMap,
 828        }
 829
 830        def with_properties(self, properties: exp.Properties) -> str:
 831            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 832
 833        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 834            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 835                values_as_table = False
 836
 837            return super().values_sql(expression, values_as_table=values_as_table)
 838
 839        def datatype_sql(self, expression: exp.DataType) -> str:
 840            expressions = expression.expressions
 841            if (
 842                expressions
 843                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 844                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 845            ):
 846                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 847                return "OBJECT"
 848
 849            return super().datatype_sql(expression)
 850
 851        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 852            return self.func(
 853                "TO_NUMBER",
 854                expression.this,
 855                expression.args.get("format"),
 856                expression.args.get("precision"),
 857                expression.args.get("scale"),
 858            )
 859
 860        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 861            milli = expression.args.get("milli")
 862            if milli is not None:
 863                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 864                expression.set("nano", milli_to_nano)
 865
 866            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 867
 868        def trycast_sql(self, expression: exp.TryCast) -> str:
 869            value = expression.this
 870
 871            if value.type is None:
 872                from sqlglot.optimizer.annotate_types import annotate_types
 873
 874                value = annotate_types(value)
 875
 876            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 877                return super().trycast_sql(expression)
 878
 879            # TRY_CAST only works for string values in Snowflake
 880            return self.cast_sql(expression)
 881
 882        def log_sql(self, expression: exp.Log) -> str:
 883            if not expression.expression:
 884                return self.func("LN", expression.this)
 885
 886            return super().log_sql(expression)
 887
 888        def unnest_sql(self, expression: exp.Unnest) -> str:
 889            unnest_alias = expression.args.get("alias")
 890            offset = expression.args.get("offset")
 891
 892            columns = [
 893                exp.to_identifier("seq"),
 894                exp.to_identifier("key"),
 895                exp.to_identifier("path"),
 896                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 897                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 898                or exp.to_identifier("value"),
 899                exp.to_identifier("this"),
 900            ]
 901
 902            if unnest_alias:
 903                unnest_alias.set("columns", columns)
 904            else:
 905                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 906
 907            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 908            alias = self.sql(unnest_alias)
 909            alias = f" AS {alias}" if alias else ""
 910            return f"{explode}{alias}"
 911
 912        def show_sql(self, expression: exp.Show) -> str:
 913            terse = "TERSE " if expression.args.get("terse") else ""
 914            history = " HISTORY" if expression.args.get("history") else ""
 915            like = self.sql(expression, "like")
 916            like = f" LIKE {like}" if like else ""
 917
 918            scope = self.sql(expression, "scope")
 919            scope = f" {scope}" if scope else ""
 920
 921            scope_kind = self.sql(expression, "scope_kind")
 922            if scope_kind:
 923                scope_kind = f" IN {scope_kind}"
 924
 925            starts_with = self.sql(expression, "starts_with")
 926            if starts_with:
 927                starts_with = f" STARTS WITH {starts_with}"
 928
 929            limit = self.sql(expression, "limit")
 930
 931            from_ = self.sql(expression, "from")
 932            if from_:
 933                from_ = f" FROM {from_}"
 934
 935            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 936
 937        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 938            # Other dialects don't support all of the following parameters, so we need to
 939            # generate default values as necessary to ensure the transpilation is correct
 940            group = expression.args.get("group")
 941            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 942            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 943            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 944
 945            return self.func(
 946                "REGEXP_SUBSTR",
 947                expression.this,
 948                expression.expression,
 949                position,
 950                occurrence,
 951                parameters,
 952                group,
 953            )
 954
 955        def except_op(self, expression: exp.Except) -> str:
 956            if not expression.args.get("distinct"):
 957                self.unsupported("EXCEPT with All is not supported in Snowflake")
 958            return super().except_op(expression)
 959
 960        def intersect_op(self, expression: exp.Intersect) -> str:
 961            if not expression.args.get("distinct"):
 962                self.unsupported("INTERSECT with All is not supported in Snowflake")
 963            return super().intersect_op(expression)
 964
 965        def describe_sql(self, expression: exp.Describe) -> str:
 966            # Default to table if kind is unknown
 967            kind_value = expression.args.get("kind") or "TABLE"
 968            kind = f" {kind_value}" if kind_value else ""
 969            this = f" {self.sql(expression, 'this')}"
 970            expressions = self.expressions(expression, flat=True)
 971            expressions = f" {expressions}" if expressions else ""
 972            return f"DESCRIBE{kind}{this}{expressions}"
 973
 974        def generatedasidentitycolumnconstraint_sql(
 975            self, expression: exp.GeneratedAsIdentityColumnConstraint
 976        ) -> str:
 977            start = expression.args.get("start")
 978            start = f" START {start}" if start else ""
 979            increment = expression.args.get("increment")
 980            increment = f" INCREMENT {increment}" if increment else ""
 981            return f"AUTOINCREMENT{start}{increment}"
 982
 983        def swaptable_sql(self, expression: exp.SwapTable) -> str:
 984            this = self.sql(expression, "this")
 985            return f"SWAP WITH {this}"
 986
 987        def cluster_sql(self, expression: exp.Cluster) -> str:
 988            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
 989
 990        def struct_sql(self, expression: exp.Struct) -> str:
 991            keys = []
 992            values = []
 993
 994            for i, e in enumerate(expression.expressions):
 995                if isinstance(e, exp.PropertyEQ):
 996                    keys.append(
 997                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
 998                    )
 999                    values.append(e.expression)
1000                else:
1001                    keys.append(exp.Literal.string(f"_{i}"))
1002                    values.append(e)
1003
1004            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1005
1006        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1007            if expression.args.get("weight") or expression.args.get("accuracy"):
1008                self.unsupported(
1009                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1010                )
1011
1012            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1013
1014        def alterset_sql(self, expression: exp.AlterSet) -> str:
1015            exprs = self.expressions(expression, flat=True)
1016            exprs = f" {exprs}" if exprs else ""
1017            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1018            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1019            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1020            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1021            tag = self.expressions(expression, key="tag", flat=True)
1022            tag = f" TAG {tag}" if tag else ""
1023
1024            return f"SET{exprs}{file_format}{copy_options}{tag}"

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TagColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Timestamp'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.StarMap'>, <class 'sqlglot.expressions.VarMap'>, <class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.Map'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
830        def with_properties(self, properties: exp.Properties) -> str:
831            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
833        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
834            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
835                values_as_table = False
836
837            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
839        def datatype_sql(self, expression: exp.DataType) -> str:
840            expressions = expression.expressions
841            if (
842                expressions
843                and expression.is_type(*exp.DataType.STRUCT_TYPES)
844                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
845            ):