Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, jsonpath, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    build_timetostr_or_tochar,
  10    binary_from_function,
  11    build_default_decimal_type,
  12    build_timestamp_from_parts,
  13    date_delta_sql,
  14    date_trunc_to_time,
  15    datestrtodate_sql,
  16    build_formatted_time,
  17    if_sql,
  18    inline_array_sql,
  19    max_or_greatest,
  20    min_or_least,
  21    rename_func,
  22    timestamptrunc_sql,
  23    timestrtotime_sql,
  24    var_map_sql,
  25    map_date_part,
  26    no_timestamp_sql,
  27    strposition_sql,
  28    timestampdiff_sql,
  29    no_make_interval_sql,
  30    groupconcat_sql,
  31)
  32from sqlglot.generator import unsupported_args
  33from sqlglot.helper import flatten, is_float, is_int, seq_get
  34from sqlglot.tokens import TokenType
  35
  36if t.TYPE_CHECKING:
  37    from sqlglot._typing import E, B
  38
  39
  40# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  41def _build_datetime(
  42    name: str, kind: exp.DataType.Type, safe: bool = False
  43) -> t.Callable[[t.List], exp.Func]:
  44    def _builder(args: t.List) -> exp.Func:
  45        value = seq_get(args, 0)
  46        scale_or_fmt = seq_get(args, 1)
  47
  48        int_value = value is not None and is_int(value.name)
  49        int_scale_or_fmt = scale_or_fmt is not None and scale_or_fmt.is_int
  50
  51        if isinstance(value, exp.Literal) or (value and scale_or_fmt):
  52            # Converts calls like `TO_TIME('01:02:03')` into casts
  53            if len(args) == 1 and value.is_string and not int_value:
  54                return (
  55                    exp.TryCast(this=value, to=exp.DataType.build(kind))
  56                    if safe
  57                    else exp.cast(value, kind)
  58                )
  59
  60            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  61            # cases so we can transpile them, since they're relatively common
  62            if kind == exp.DataType.Type.TIMESTAMP:
  63                if not safe and (int_value or int_scale_or_fmt):
  64                    # TRY_TO_TIMESTAMP('integer') is not parsed into exp.UnixToTime as
  65                    # it's not easily transpilable
  66                    return exp.UnixToTime(this=value, scale=scale_or_fmt)
  67                if not int_scale_or_fmt and not is_float(value.name):
  68                    expr = build_formatted_time(exp.StrToTime, "snowflake")(args)
  69                    expr.set("safe", safe)
  70                    return expr
  71
  72        if kind in (exp.DataType.Type.DATE, exp.DataType.Type.TIME) and not int_value:
  73            klass = exp.TsOrDsToDate if kind == exp.DataType.Type.DATE else exp.TsOrDsToTime
  74            formatted_exp = build_formatted_time(klass, "snowflake")(args)
  75            formatted_exp.set("safe", safe)
  76            return formatted_exp
  77
  78        return exp.Anonymous(this=name, expressions=args)
  79
  80    return _builder
  81
  82
  83def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  84    expression = parser.build_var_map(args)
  85
  86    if isinstance(expression, exp.StarMap):
  87        return expression
  88
  89    return exp.Struct(
  90        expressions=[
  91            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  92        ]
  93    )
  94
  95
  96def _build_datediff(args: t.List) -> exp.DateDiff:
  97    return exp.DateDiff(
  98        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  99    )
 100
 101
 102def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
 103    def _builder(args: t.List) -> E:
 104        return expr_type(
 105            this=seq_get(args, 2),
 106            expression=seq_get(args, 1),
 107            unit=map_date_part(seq_get(args, 0)),
 108        )
 109
 110    return _builder
 111
 112
 113def _build_bitwise(expr_type: t.Type[B], name: str) -> t.Callable[[t.List], B | exp.Anonymous]:
 114    def _builder(args: t.List) -> B | exp.Anonymous:
 115        if len(args) == 3:
 116            return exp.Anonymous(this=name, expressions=args)
 117
 118        return binary_from_function(expr_type)(args)
 119
 120    return _builder
 121
 122
 123# https://docs.snowflake.com/en/sql-reference/functions/div0
 124def _build_if_from_div0(args: t.List) -> exp.If:
 125    lhs = exp._wrap(seq_get(args, 0), exp.Binary)
 126    rhs = exp._wrap(seq_get(args, 1), exp.Binary)
 127
 128    cond = exp.EQ(this=rhs, expression=exp.Literal.number(0)).and_(
 129        exp.Is(this=lhs, expression=exp.null()).not_()
 130    )
 131    true = exp.Literal.number(0)
 132    false = exp.Div(this=lhs, expression=rhs)
 133    return exp.If(this=cond, true=true, false=false)
 134
 135
 136# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 137def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 138    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 139    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 140
 141
 142# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 143def _build_if_from_nullifzero(args: t.List) -> exp.If:
 144    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 145    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 146
 147
 148def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 149    flag = expression.text("flag")
 150
 151    if "i" not in flag:
 152        flag += "i"
 153
 154    return self.func(
 155        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 156    )
 157
 158
 159def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 160    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 161
 162    if not regexp_replace.args.get("replacement"):
 163        regexp_replace.set("replacement", exp.Literal.string(""))
 164
 165    return regexp_replace
 166
 167
 168def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 169    def _parse(self: Snowflake.Parser) -> exp.Show:
 170        return self._parse_show_snowflake(*args, **kwargs)
 171
 172    return _parse
 173
 174
 175def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 176    trunc = date_trunc_to_time(args)
 177    trunc.set("unit", map_date_part(trunc.args["unit"]))
 178    return trunc
 179
 180
 181def _unqualify_pivot_columns(expression: exp.Expression) -> exp.Expression:
 182    """
 183    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 184    so we need to unqualify them. Same goes for ANY ORDER BY <column>.
 185
 186    Example:
 187        >>> from sqlglot import parse_one
 188        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 189        >>> print(_unqualify_pivot_columns(expr).sql(dialect="snowflake"))
 190        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 191    """
 192    if isinstance(expression, exp.Pivot):
 193        if expression.unpivot:
 194            expression = transforms.unqualify_columns(expression)
 195        else:
 196            for field in expression.fields:
 197                field_expr = seq_get(field.expressions if field else [], 0)
 198
 199                if isinstance(field_expr, exp.PivotAny):
 200                    unqualified_field_expr = transforms.unqualify_columns(field_expr)
 201                    t.cast(exp.Expression, field).set("expressions", unqualified_field_expr, 0)
 202
 203    return expression
 204
 205
 206def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 207    assert isinstance(expression, exp.Create)
 208
 209    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 210        if expression.this in exp.DataType.NESTED_TYPES:
 211            expression.set("expressions", None)
 212        return expression
 213
 214    props = expression.args.get("properties")
 215    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 216        for schema_expression in expression.this.expressions:
 217            if isinstance(schema_expression, exp.ColumnDef):
 218                column_type = schema_expression.kind
 219                if isinstance(column_type, exp.DataType):
 220                    column_type.transform(_flatten_structured_type, copy=False)
 221
 222    return expression
 223
 224
 225def _unnest_generate_date_array(unnest: exp.Unnest) -> None:
 226    generate_date_array = unnest.expressions[0]
 227    start = generate_date_array.args.get("start")
 228    end = generate_date_array.args.get("end")
 229    step = generate_date_array.args.get("step")
 230
 231    if not start or not end or not isinstance(step, exp.Interval) or step.name != "1":
 232        return
 233
 234    unit = step.args.get("unit")
 235
 236    unnest_alias = unnest.args.get("alias")
 237    if unnest_alias:
 238        unnest_alias = unnest_alias.copy()
 239        sequence_value_name = seq_get(unnest_alias.columns, 0) or "value"
 240    else:
 241        sequence_value_name = "value"
 242
 243    # We'll add the next sequence value to the starting date and project the result
 244    date_add = _build_date_time_add(exp.DateAdd)(
 245        [unit, exp.cast(sequence_value_name, "int"), exp.cast(start, "date")]
 246    ).as_(sequence_value_name)
 247
 248    # We use DATEDIFF to compute the number of sequence values needed
 249    number_sequence = Snowflake.Parser.FUNCTIONS["ARRAY_GENERATE_RANGE"](
 250        [exp.Literal.number(0), _build_datediff([unit, start, end]) + 1]
 251    )
 252
 253    unnest.set("expressions", [number_sequence])
 254    unnest.replace(exp.select(date_add).from_(unnest.copy()).subquery(unnest_alias))
 255
 256
 257def _transform_generate_date_array(expression: exp.Expression) -> exp.Expression:
 258    if isinstance(expression, exp.Select):
 259        for generate_date_array in expression.find_all(exp.GenerateDateArray):
 260            parent = generate_date_array.parent
 261
 262            # If GENERATE_DATE_ARRAY is used directly as an array (e.g passed into ARRAY_LENGTH), the transformed Snowflake
 263            # query is the following (it'll be unnested properly on the next iteration due to copy):
 264            # SELECT ref(GENERATE_DATE_ARRAY(...)) -> SELECT ref((SELECT ARRAY_AGG(*) FROM UNNEST(GENERATE_DATE_ARRAY(...))))
 265            if not isinstance(parent, exp.Unnest):
 266                unnest = exp.Unnest(expressions=[generate_date_array.copy()])
 267                generate_date_array.replace(
 268                    exp.select(exp.ArrayAgg(this=exp.Star())).from_(unnest).subquery()
 269                )
 270
 271            if (
 272                isinstance(parent, exp.Unnest)
 273                and isinstance(parent.parent, (exp.From, exp.Join))
 274                and len(parent.expressions) == 1
 275            ):
 276                _unnest_generate_date_array(parent)
 277
 278    return expression
 279
 280
 281def _build_regexp_extract(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
 282    def _builder(args: t.List) -> E:
 283        return expr_type(
 284            this=seq_get(args, 0),
 285            expression=seq_get(args, 1),
 286            position=seq_get(args, 2),
 287            occurrence=seq_get(args, 3),
 288            parameters=seq_get(args, 4),
 289            group=seq_get(args, 5) or exp.Literal.number(0),
 290        )
 291
 292    return _builder
 293
 294
 295def _regexpextract_sql(self, expression: exp.RegexpExtract | exp.RegexpExtractAll) -> str:
 296    # Other dialects don't support all of the following parameters, so we need to
 297    # generate default values as necessary to ensure the transpilation is correct
 298    group = expression.args.get("group")
 299
 300    # To avoid generating all these default values, we set group to None if
 301    # it's 0 (also default value) which doesn't trigger the following chain
 302    if group and group.name == "0":
 303        group = None
 304
 305    parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 306    occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 307    position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 308
 309    return self.func(
 310        "REGEXP_SUBSTR" if isinstance(expression, exp.RegexpExtract) else "REGEXP_EXTRACT_ALL",
 311        expression.this,
 312        expression.expression,
 313        position,
 314        occurrence,
 315        parameters,
 316        group,
 317    )
 318
 319
 320def _json_extract_value_array_sql(
 321    self: Snowflake.Generator, expression: exp.JSONValueArray | exp.JSONExtractArray
 322) -> str:
 323    json_extract = exp.JSONExtract(this=expression.this, expression=expression.expression)
 324    ident = exp.to_identifier("x")
 325
 326    if isinstance(expression, exp.JSONValueArray):
 327        this: exp.Expression = exp.cast(ident, to=exp.DataType.Type.VARCHAR)
 328    else:
 329        this = exp.ParseJSON(this=f"TO_JSON({ident})")
 330
 331    transform_lambda = exp.Lambda(expressions=[ident], this=this)
 332
 333    return self.func("TRANSFORM", json_extract, transform_lambda)
 334
 335
 336class Snowflake(Dialect):
 337    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 338    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 339    NULL_ORDERING = "nulls_are_large"
 340    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 341    SUPPORTS_USER_DEFINED_TYPES = False
 342    SUPPORTS_SEMI_ANTI_JOIN = False
 343    PREFER_CTE_ALIAS_COLUMN = True
 344    TABLESAMPLE_SIZE_IS_PERCENT = True
 345    COPY_PARAMS_ARE_CSV = False
 346    ARRAY_AGG_INCLUDES_NULLS = None
 347
 348    TIME_MAPPING = {
 349        "YYYY": "%Y",
 350        "yyyy": "%Y",
 351        "YY": "%y",
 352        "yy": "%y",
 353        "MMMM": "%B",
 354        "mmmm": "%B",
 355        "MON": "%b",
 356        "mon": "%b",
 357        "MM": "%m",
 358        "mm": "%m",
 359        "DD": "%d",
 360        "dd": "%-d",
 361        "DY": "%a",
 362        "dy": "%w",
 363        "HH24": "%H",
 364        "hh24": "%H",
 365        "HH12": "%I",
 366        "hh12": "%I",
 367        "MI": "%M",
 368        "mi": "%M",
 369        "SS": "%S",
 370        "ss": "%S",
 371        "FF6": "%f",
 372        "ff6": "%f",
 373    }
 374
 375    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 376        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 377        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 378        if (
 379            isinstance(expression, exp.Identifier)
 380            and isinstance(expression.parent, exp.Table)
 381            and expression.name.lower() == "dual"
 382        ):
 383            return expression  # type: ignore
 384
 385        return super().quote_identifier(expression, identify=identify)
 386
 387    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
 388        SINGLE_TOKENS = jsonpath.JSONPathTokenizer.SINGLE_TOKENS.copy()
 389        SINGLE_TOKENS.pop("$")
 390
 391    class Parser(parser.Parser):
 392        IDENTIFY_PIVOT_STRINGS = True
 393        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 394        COLON_IS_VARIANT_EXTRACT = True
 395
 396        ID_VAR_TOKENS = {
 397            *parser.Parser.ID_VAR_TOKENS,
 398            TokenType.MATCH_CONDITION,
 399        }
 400
 401        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 402        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 403
 404        COLON_PLACEHOLDER_TOKENS = ID_VAR_TOKENS | {TokenType.NUMBER}
 405
 406        FUNCTIONS = {
 407            **parser.Parser.FUNCTIONS,
 408            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 409            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 410            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 411                this=seq_get(args, 1), expression=seq_get(args, 0)
 412            ),
 413            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 414                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 415                start=seq_get(args, 0),
 416                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 417                step=seq_get(args, 2),
 418            ),
 419            "BITXOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 420            "BIT_XOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 421            "BITOR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 422            "BIT_OR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 423            "BITSHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BITSHIFTLEFT"),
 424            "BIT_SHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BIT_SHIFTLEFT"),
 425            "BITSHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BITSHIFTRIGHT"),
 426            "BIT_SHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BIT_SHIFTRIGHT"),
 427            "BOOLXOR": _build_bitwise(exp.Xor, "BOOLXOR"),
 428            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 429            "DATE_TRUNC": _date_trunc_to_time,
 430            "DATEADD": _build_date_time_add(exp.DateAdd),
 431            "DATEDIFF": _build_datediff,
 432            "DIV0": _build_if_from_div0,
 433            "EDITDISTANCE": lambda args: exp.Levenshtein(
 434                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 435            ),
 436            "FLATTEN": exp.Explode.from_arg_list,
 437            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 438                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 439            ),
 440            "HEX_DECODE_BINARY": exp.Unhex.from_arg_list,
 441            "IFF": exp.If.from_arg_list,
 442            "LAST_DAY": lambda args: exp.LastDay(
 443                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 444            ),
 445            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 446            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 447            "NULLIFZERO": _build_if_from_nullifzero,
 448            "OBJECT_CONSTRUCT": _build_object_construct,
 449            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 450            "REGEXP_REPLACE": _build_regexp_replace,
 451            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 452            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 453            "RLIKE": exp.RegexpLike.from_arg_list,
 454            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 455            "TABLE": lambda args: exp.TableFromRows(this=seq_get(args, 0)),
 456            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 457            "TIMEDIFF": _build_datediff,
 458            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 459            "TIMESTAMPDIFF": _build_datediff,
 460            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 461            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 462            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 463            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 464            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 465            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 466            "TRY_TO_TIME": _build_datetime("TRY_TO_TIME", exp.DataType.Type.TIME, safe=True),
 467            "TRY_TO_TIMESTAMP": _build_datetime(
 468                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 469            ),
 470            "TO_CHAR": build_timetostr_or_tochar,
 471            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 472            "TO_NUMBER": lambda args: exp.ToNumber(
 473                this=seq_get(args, 0),
 474                format=seq_get(args, 1),
 475                precision=seq_get(args, 2),
 476                scale=seq_get(args, 3),
 477            ),
 478            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 479            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 480            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 481            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 482            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 483            "TO_VARCHAR": exp.ToChar.from_arg_list,
 484            "ZEROIFNULL": _build_if_from_zeroifnull,
 485        }
 486
 487        FUNCTION_PARSERS = {
 488            **parser.Parser.FUNCTION_PARSERS,
 489            "DATE_PART": lambda self: self._parse_date_part(),
 490            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 491            "LISTAGG": lambda self: self._parse_string_agg(),
 492        }
 493        FUNCTION_PARSERS.pop("TRIM")
 494
 495        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 496
 497        RANGE_PARSERS = {
 498            **parser.Parser.RANGE_PARSERS,
 499            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 500            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 501        }
 502
 503        ALTER_PARSERS = {
 504            **parser.Parser.ALTER_PARSERS,
 505            "UNSET": lambda self: self.expression(
 506                exp.Set,
 507                tag=self._match_text_seq("TAG"),
 508                expressions=self._parse_csv(self._parse_id_var),
 509                unset=True,
 510            ),
 511        }
 512
 513        STATEMENT_PARSERS = {
 514            **parser.Parser.STATEMENT_PARSERS,
 515            TokenType.GET: lambda self: self._parse_get(),
 516            TokenType.PUT: lambda self: self._parse_put(),
 517            TokenType.SHOW: lambda self: self._parse_show(),
 518        }
 519
 520        PROPERTY_PARSERS = {
 521            **parser.Parser.PROPERTY_PARSERS,
 522            "CREDENTIALS": lambda self: self._parse_credentials_property(),
 523            "FILE_FORMAT": lambda self: self._parse_file_format_property(),
 524            "LOCATION": lambda self: self._parse_location_property(),
 525            "TAG": lambda self: self._parse_tag(),
 526            "USING": lambda self: self._match_text_seq("TEMPLATE")
 527            and self.expression(exp.UsingTemplateProperty, this=self._parse_statement()),
 528        }
 529
 530        TYPE_CONVERTERS = {
 531            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 532            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 533        }
 534
 535        SHOW_PARSERS = {
 536            "DATABASES": _show_parser("DATABASES"),
 537            "TERSE DATABASES": _show_parser("DATABASES"),
 538            "SCHEMAS": _show_parser("SCHEMAS"),
 539            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 540            "OBJECTS": _show_parser("OBJECTS"),
 541            "TERSE OBJECTS": _show_parser("OBJECTS"),
 542            "TABLES": _show_parser("TABLES"),
 543            "TERSE TABLES": _show_parser("TABLES"),
 544            "VIEWS": _show_parser("VIEWS"),
 545            "TERSE VIEWS": _show_parser("VIEWS"),
 546            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 547            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 548            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 549            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 550            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 551            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 552            "SEQUENCES": _show_parser("SEQUENCES"),
 553            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 554            "STAGES": _show_parser("STAGES"),
 555            "COLUMNS": _show_parser("COLUMNS"),
 556            "USERS": _show_parser("USERS"),
 557            "TERSE USERS": _show_parser("USERS"),
 558            "FILE FORMATS": _show_parser("FILE FORMATS"),
 559            "FUNCTIONS": _show_parser("FUNCTIONS"),
 560            "PROCEDURES": _show_parser("PROCEDURES"),
 561            "WAREHOUSES": _show_parser("WAREHOUSES"),
 562        }
 563
 564        CONSTRAINT_PARSERS = {
 565            **parser.Parser.CONSTRAINT_PARSERS,
 566            "WITH": lambda self: self._parse_with_constraint(),
 567            "MASKING": lambda self: self._parse_with_constraint(),
 568            "PROJECTION": lambda self: self._parse_with_constraint(),
 569            "TAG": lambda self: self._parse_with_constraint(),
 570        }
 571
 572        STAGED_FILE_SINGLE_TOKENS = {
 573            TokenType.DOT,
 574            TokenType.MOD,
 575            TokenType.SLASH,
 576        }
 577
 578        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 579
 580        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 581
 582        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 583
 584        LAMBDAS = {
 585            **parser.Parser.LAMBDAS,
 586            TokenType.ARROW: lambda self, expressions: self.expression(
 587                exp.Lambda,
 588                this=self._replace_lambda(
 589                    self._parse_assignment(),
 590                    expressions,
 591                ),
 592                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 593            ),
 594        }
 595
 596        def _parse_use(self) -> exp.Use:
 597            if self._match_text_seq("SECONDARY", "ROLES"):
 598                this = self._match_texts(("ALL", "NONE")) and exp.var(self._prev.text.upper())
 599                roles = None if this else self._parse_csv(lambda: self._parse_table(schema=False))
 600                return self.expression(
 601                    exp.Use, kind="SECONDARY ROLES", this=this, expressions=roles
 602                )
 603
 604            return super()._parse_use()
 605
 606        def _negate_range(
 607            self, this: t.Optional[exp.Expression] = None
 608        ) -> t.Optional[exp.Expression]:
 609            if not this:
 610                return this
 611
 612            query = this.args.get("query")
 613            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 614                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 615                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 616                # which can produce different results (most likely a SnowFlake bug).
 617                #
 618                # https://docs.snowflake.com/en/sql-reference/functions/in
 619                # Context: https://github.com/tobymao/sqlglot/issues/3890
 620                return self.expression(
 621                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 622                )
 623
 624            return self.expression(exp.Not, this=this)
 625
 626        def _parse_tag(self) -> exp.Tags:
 627            return self.expression(
 628                exp.Tags,
 629                expressions=self._parse_wrapped_csv(self._parse_property),
 630            )
 631
 632        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 633            if self._prev.token_type != TokenType.WITH:
 634                self._retreat(self._index - 1)
 635
 636            if self._match_text_seq("MASKING", "POLICY"):
 637                policy = self._parse_column()
 638                return self.expression(
 639                    exp.MaskingPolicyColumnConstraint,
 640                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 641                    expressions=self._match(TokenType.USING)
 642                    and self._parse_wrapped_csv(self._parse_id_var),
 643                )
 644            if self._match_text_seq("PROJECTION", "POLICY"):
 645                policy = self._parse_column()
 646                return self.expression(
 647                    exp.ProjectionPolicyColumnConstraint,
 648                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 649                )
 650            if self._match(TokenType.TAG):
 651                return self._parse_tag()
 652
 653            return None
 654
 655        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
 656            if self._match(TokenType.TAG):
 657                return self._parse_tag()
 658
 659            return super()._parse_with_property()
 660
 661        def _parse_create(self) -> exp.Create | exp.Command:
 662            expression = super()._parse_create()
 663            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 664                # Replace the Table node with the enclosed Identifier
 665                expression.this.replace(expression.this.this)
 666
 667            return expression
 668
 669        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 670        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 671        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 672            this = self._parse_var() or self._parse_type()
 673
 674            if not this:
 675                return None
 676
 677            self._match(TokenType.COMMA)
 678            expression = self._parse_bitwise()
 679            this = map_date_part(this)
 680            name = this.name.upper()
 681
 682            if name.startswith("EPOCH"):
 683                if name == "EPOCH_MILLISECOND":
 684                    scale = 10**3
 685                elif name == "EPOCH_MICROSECOND":
 686                    scale = 10**6
 687                elif name == "EPOCH_NANOSECOND":
 688                    scale = 10**9
 689                else:
 690                    scale = None
 691
 692                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 693                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 694
 695                if scale:
 696                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 697
 698                return to_unix
 699
 700            return self.expression(exp.Extract, this=this, expression=expression)
 701
 702        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 703            if is_map:
 704                # Keys are strings in Snowflake's objects, see also:
 705                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 706                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 707                return self._parse_slice(self._parse_string())
 708
 709            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 710
 711        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 712            lateral = super()._parse_lateral()
 713            if not lateral:
 714                return lateral
 715
 716            if isinstance(lateral.this, exp.Explode):
 717                table_alias = lateral.args.get("alias")
 718                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 719                if table_alias and not table_alias.args.get("columns"):
 720                    table_alias.set("columns", columns)
 721                elif not table_alias:
 722                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 723
 724            return lateral
 725
 726        def _parse_table_parts(
 727            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 728        ) -> exp.Table:
 729            # https://docs.snowflake.com/en/user-guide/querying-stage
 730            if self._match(TokenType.STRING, advance=False):
 731                table = self._parse_string()
 732            elif self._match_text_seq("@", advance=False):
 733                table = self._parse_location_path()
 734            else:
 735                table = None
 736
 737            if table:
 738                file_format = None
 739                pattern = None
 740
 741                wrapped = self._match(TokenType.L_PAREN)
 742                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 743                    if self._match_text_seq("FILE_FORMAT", "=>"):
 744                        file_format = self._parse_string() or super()._parse_table_parts(
 745                            is_db_reference=is_db_reference
 746                        )
 747                    elif self._match_text_seq("PATTERN", "=>"):
 748                        pattern = self._parse_string()
 749                    else:
 750                        break
 751
 752                    self._match(TokenType.COMMA)
 753
 754                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 755            else:
 756                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 757
 758            return table
 759
 760        def _parse_table(
 761            self,
 762            schema: bool = False,
 763            joins: bool = False,
 764            alias_tokens: t.Optional[t.Collection[TokenType]] = None,
 765            parse_bracket: bool = False,
 766            is_db_reference: bool = False,
 767            parse_partition: bool = False,
 768        ) -> t.Optional[exp.Expression]:
 769            table = super()._parse_table(
 770                schema=schema,
 771                joins=joins,
 772                alias_tokens=alias_tokens,
 773                parse_bracket=parse_bracket,
 774                is_db_reference=is_db_reference,
 775                parse_partition=parse_partition,
 776            )
 777            if isinstance(table, exp.Table) and isinstance(table.this, exp.TableFromRows):
 778                table_from_rows = table.this
 779                for arg in exp.TableFromRows.arg_types:
 780                    if arg != "this":
 781                        table_from_rows.set(arg, table.args.get(arg))
 782
 783                table = table_from_rows
 784
 785            return table
 786
 787        def _parse_id_var(
 788            self,
 789            any_token: bool = True,
 790            tokens: t.Optional[t.Collection[TokenType]] = None,
 791        ) -> t.Optional[exp.Expression]:
 792            if self._match_text_seq("IDENTIFIER", "("):
 793                identifier = (
 794                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 795                    or self._parse_string()
 796                )
 797                self._match_r_paren()
 798                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 799
 800            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 801
 802        def _parse_show_snowflake(self, this: str) -> exp.Show:
 803            scope = None
 804            scope_kind = None
 805
 806            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 807            # which is syntactically valid but has no effect on the output
 808            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 809
 810            history = self._match_text_seq("HISTORY")
 811
 812            like = self._parse_string() if self._match(TokenType.LIKE) else None
 813
 814            if self._match(TokenType.IN):
 815                if self._match_text_seq("ACCOUNT"):
 816                    scope_kind = "ACCOUNT"
 817                elif self._match_text_seq("CLASS"):
 818                    scope_kind = "CLASS"
 819                    scope = self._parse_table_parts()
 820                elif self._match_text_seq("APPLICATION"):
 821                    scope_kind = "APPLICATION"
 822                    if self._match_text_seq("PACKAGE"):
 823                        scope_kind += " PACKAGE"
 824                    scope = self._parse_table_parts()
 825                elif self._match_set(self.DB_CREATABLES):
 826                    scope_kind = self._prev.text.upper()
 827                    if self._curr:
 828                        scope = self._parse_table_parts()
 829                elif self._curr:
 830                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 831                    scope = self._parse_table_parts()
 832
 833            return self.expression(
 834                exp.Show,
 835                **{
 836                    "terse": terse,
 837                    "this": this,
 838                    "history": history,
 839                    "like": like,
 840                    "scope": scope,
 841                    "scope_kind": scope_kind,
 842                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 843                    "limit": self._parse_limit(),
 844                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 845                    "privileges": self._match_text_seq("WITH", "PRIVILEGES")
 846                    and self._parse_csv(lambda: self._parse_var(any_token=True, upper=True)),
 847                },
 848            )
 849
 850        def _parse_put(self) -> exp.Put | exp.Command:
 851            if self._curr.token_type != TokenType.STRING:
 852                return self._parse_as_command(self._prev)
 853
 854            return self.expression(
 855                exp.Put,
 856                this=self._parse_string(),
 857                target=self._parse_location_path(),
 858                properties=self._parse_properties(),
 859            )
 860
 861        def _parse_get(self) -> exp.Get | exp.Command:
 862            start = self._prev
 863            target = self._parse_location_path()
 864
 865            # Parse as command if unquoted file path
 866            if self._curr.token_type == TokenType.URI_START:
 867                return self._parse_as_command(start)
 868
 869            return self.expression(
 870                exp.Get,
 871                this=self._parse_string(),
 872                target=target,
 873                properties=self._parse_properties(),
 874            )
 875
 876        def _parse_location_property(self) -> exp.LocationProperty:
 877            self._match(TokenType.EQ)
 878            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 879
 880        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 881            # Parse either a subquery or a staged file
 882            return (
 883                self._parse_select(table=True, parse_subquery_alias=False)
 884                if self._match(TokenType.L_PAREN, advance=False)
 885                else self._parse_table_parts()
 886            )
 887
 888        def _parse_location_path(self) -> exp.Var:
 889            start = self._curr
 890            self._advance_any(ignore_reserved=True)
 891
 892            # We avoid consuming a comma token because external tables like @foo and @bar
 893            # can be joined in a query with a comma separator, as well as closing paren
 894            # in case of subqueries
 895            while self._is_connected() and not self._match_set(
 896                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 897            ):
 898                self._advance_any(ignore_reserved=True)
 899
 900            return exp.var(self._find_sql(start, self._prev))
 901
 902        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 903            this = super()._parse_lambda_arg()
 904
 905            if not this:
 906                return this
 907
 908            typ = self._parse_types()
 909
 910            if typ:
 911                return self.expression(exp.Cast, this=this, to=typ)
 912
 913            return this
 914
 915        def _parse_foreign_key(self) -> exp.ForeignKey:
 916            # inlineFK, the REFERENCES columns are implied
 917            if self._match(TokenType.REFERENCES, advance=False):
 918                return self.expression(exp.ForeignKey)
 919
 920            # outoflineFK, explicitly names the columns
 921            return super()._parse_foreign_key()
 922
 923        def _parse_file_format_property(self) -> exp.FileFormatProperty:
 924            self._match(TokenType.EQ)
 925            if self._match(TokenType.L_PAREN, advance=False):
 926                expressions = self._parse_wrapped_options()
 927            else:
 928                expressions = [self._parse_format_name()]
 929
 930            return self.expression(
 931                exp.FileFormatProperty,
 932                expressions=expressions,
 933            )
 934
 935        def _parse_credentials_property(self) -> exp.CredentialsProperty:
 936            return self.expression(
 937                exp.CredentialsProperty,
 938                expressions=self._parse_wrapped_options(),
 939            )
 940
 941    class Tokenizer(tokens.Tokenizer):
 942        STRING_ESCAPES = ["\\", "'"]
 943        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 944        RAW_STRINGS = ["$$"]
 945        COMMENTS = ["--", "//", ("/*", "*/")]
 946        NESTED_COMMENTS = False
 947
 948        KEYWORDS = {
 949            **tokens.Tokenizer.KEYWORDS,
 950            "FILE://": TokenType.URI_START,
 951            "BYTEINT": TokenType.INT,
 952            "CHAR VARYING": TokenType.VARCHAR,
 953            "CHARACTER VARYING": TokenType.VARCHAR,
 954            "EXCLUDE": TokenType.EXCEPT,
 955            "FILE FORMAT": TokenType.FILE_FORMAT,
 956            "GET": TokenType.GET,
 957            "ILIKE ANY": TokenType.ILIKE_ANY,
 958            "LIKE ANY": TokenType.LIKE_ANY,
 959            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 960            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 961            "MINUS": TokenType.EXCEPT,
 962            "NCHAR VARYING": TokenType.VARCHAR,
 963            "PUT": TokenType.PUT,
 964            "REMOVE": TokenType.COMMAND,
 965            "RM": TokenType.COMMAND,
 966            "SAMPLE": TokenType.TABLE_SAMPLE,
 967            "SQL_DOUBLE": TokenType.DOUBLE,
 968            "SQL_VARCHAR": TokenType.VARCHAR,
 969            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 970            "TAG": TokenType.TAG,
 971            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 972            "TOP": TokenType.TOP,
 973            "WAREHOUSE": TokenType.WAREHOUSE,
 974            "STAGE": TokenType.STAGE,
 975            "STREAMLIT": TokenType.STREAMLIT,
 976        }
 977        KEYWORDS.pop("/*+")
 978
 979        SINGLE_TOKENS = {
 980            **tokens.Tokenizer.SINGLE_TOKENS,
 981            "$": TokenType.PARAMETER,
 982        }
 983
 984        VAR_SINGLE_TOKENS = {"$"}
 985
 986        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 987
 988    class Generator(generator.Generator):
 989        PARAMETER_TOKEN = "$"
 990        MATCHED_BY_SOURCE = False
 991        SINGLE_STRING_INTERVAL = True
 992        JOIN_HINTS = False
 993        TABLE_HINTS = False
 994        QUERY_HINTS = False
 995        AGGREGATE_FILTER_SUPPORTED = False
 996        SUPPORTS_TABLE_COPY = False
 997        COLLATE_IS_FUNC = True
 998        LIMIT_ONLY_LITERALS = True
 999        JSON_KEY_VALUE_PAIR_SEP = ","
1000        INSERT_OVERWRITE = " OVERWRITE INTO"
1001        STRUCT_DELIMITER = ("(", ")")
1002        COPY_PARAMS_ARE_WRAPPED = False
1003        COPY_PARAMS_EQ_REQUIRED = True
1004        STAR_EXCEPT = "EXCLUDE"
1005        SUPPORTS_EXPLODING_PROJECTIONS = False
1006        ARRAY_CONCAT_IS_VAR_LEN = False
1007        SUPPORTS_CONVERT_TIMEZONE = True
1008        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
1009        SUPPORTS_MEDIAN = True
1010        ARRAY_SIZE_NAME = "ARRAY_SIZE"
1011
1012        TRANSFORMS = {
1013            **generator.Generator.TRANSFORMS,
1014            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1015            exp.ArgMax: rename_func("MAX_BY"),
1016            exp.ArgMin: rename_func("MIN_BY"),
1017            exp.Array: inline_array_sql,
1018            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
1019            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
1020            exp.AtTimeZone: lambda self, e: self.func(
1021                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
1022            ),
1023            exp.BitwiseOr: rename_func("BITOR"),
1024            exp.BitwiseXor: rename_func("BITXOR"),
1025            exp.BitwiseLeftShift: rename_func("BITSHIFTLEFT"),
1026            exp.BitwiseRightShift: rename_func("BITSHIFTRIGHT"),
1027            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
1028            exp.DateAdd: date_delta_sql("DATEADD"),
1029            exp.DateDiff: date_delta_sql("DATEDIFF"),
1030            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
1031            exp.DatetimeDiff: timestampdiff_sql,
1032            exp.DateStrToDate: datestrtodate_sql,
1033            exp.DayOfMonth: rename_func("DAYOFMONTH"),
1034            exp.DayOfWeek: rename_func("DAYOFWEEK"),
1035            exp.DayOfYear: rename_func("DAYOFYEAR"),
1036            exp.Explode: rename_func("FLATTEN"),
1037            exp.Extract: rename_func("DATE_PART"),
1038            exp.FileFormatProperty: lambda self,
1039            e: f"FILE_FORMAT=({self.expressions(e, 'expressions', sep=' ')})",
1040            exp.FromTimeZone: lambda self, e: self.func(
1041                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
1042            ),
1043            exp.GenerateSeries: lambda self, e: self.func(
1044                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
1045            ),
1046            exp.GroupConcat: lambda self, e: groupconcat_sql(self, e, sep=""),
1047            exp.If: if_sql(name="IFF", false_value="NULL"),
1048            exp.JSONExtractArray: _json_extract_value_array_sql,
1049            exp.JSONExtractScalar: lambda self, e: self.func(
1050                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
1051            ),
1052            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
1053            exp.JSONPathRoot: lambda *_: "",
1054            exp.JSONValueArray: _json_extract_value_array_sql,
1055            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
1056                rename_func("EDITDISTANCE")
1057            ),
1058            exp.LocationProperty: lambda self, e: f"LOCATION={self.sql(e, 'this')}",
1059            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
1060            exp.LogicalOr: rename_func("BOOLOR_AGG"),
1061            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1062            exp.MakeInterval: no_make_interval_sql,
1063            exp.Max: max_or_greatest,
1064            exp.Min: min_or_least,
1065            exp.ParseJSON: lambda self, e: self.func(
1066                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
1067            ),
1068            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1069            exp.PercentileCont: transforms.preprocess(
1070                [transforms.add_within_group_for_percentiles]
1071            ),
1072            exp.PercentileDisc: transforms.preprocess(
1073                [transforms.add_within_group_for_percentiles]
1074            ),
1075            exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]),
1076            exp.RegexpExtract: _regexpextract_sql,
1077            exp.RegexpExtractAll: _regexpextract_sql,
1078            exp.RegexpILike: _regexpilike_sql,
1079            exp.Rand: rename_func("RANDOM"),
1080            exp.Select: transforms.preprocess(
1081                [
1082                    transforms.eliminate_distinct_on,
1083                    transforms.explode_projection_to_unnest(),
1084                    transforms.eliminate_semi_and_anti_joins,
1085                    _transform_generate_date_array,
1086                ]
1087            ),
1088            exp.SHA: rename_func("SHA1"),
1089            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
1090            exp.StartsWith: rename_func("STARTSWITH"),
1091            exp.StrPosition: lambda self, e: strposition_sql(
1092                self, e, func_name="CHARINDEX", supports_position=True
1093            ),
1094            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
1095            exp.Stuff: rename_func("INSERT"),
1096            exp.TimeAdd: date_delta_sql("TIMEADD"),
1097            exp.Timestamp: no_timestamp_sql,
1098            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
1099            exp.TimestampDiff: lambda self, e: self.func(
1100                "TIMESTAMPDIFF", e.unit, e.expression, e.this
1101            ),
1102            exp.TimestampTrunc: timestamptrunc_sql(),
1103            exp.TimeStrToTime: timestrtotime_sql,
1104            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
1105            exp.ToArray: rename_func("TO_ARRAY"),
1106            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
1107            exp.ToDouble: rename_func("TO_DOUBLE"),
1108            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
1109            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
1110            exp.TsOrDsToDate: lambda self, e: self.func(
1111                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
1112            ),
1113            exp.TsOrDsToTime: lambda self, e: self.func(
1114                "TRY_TO_TIME" if e.args.get("safe") else "TO_TIME", e.this, self.format_time(e)
1115            ),
1116            exp.Unhex: rename_func("HEX_DECODE_BINARY"),
1117            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
1118            exp.Uuid: rename_func("UUID_STRING"),
1119            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1120            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
1121            exp.Xor: rename_func("BOOLXOR"),
1122        }
1123
1124        SUPPORTED_JSON_PATH_PARTS = {
1125            exp.JSONPathKey,
1126            exp.JSONPathRoot,
1127            exp.JSONPathSubscript,
1128        }
1129
1130        TYPE_MAPPING = {
1131            **generator.Generator.TYPE_MAPPING,
1132            exp.DataType.Type.NESTED: "OBJECT",
1133            exp.DataType.Type.STRUCT: "OBJECT",
1134        }
1135
1136        TOKEN_MAPPING = {
1137            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
1138        }
1139
1140        PROPERTIES_LOCATION = {
1141            **generator.Generator.PROPERTIES_LOCATION,
1142            exp.CredentialsProperty: exp.Properties.Location.POST_WITH,
1143            exp.LocationProperty: exp.Properties.Location.POST_WITH,
1144            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1145            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
1146            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1147        }
1148
1149        UNSUPPORTED_VALUES_EXPRESSIONS = {
1150            exp.Map,
1151            exp.StarMap,
1152            exp.Struct,
1153            exp.VarMap,
1154        }
1155
1156        def with_properties(self, properties: exp.Properties) -> str:
1157            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1158
1159        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1160            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1161                values_as_table = False
1162
1163            return super().values_sql(expression, values_as_table=values_as_table)
1164
1165        def datatype_sql(self, expression: exp.DataType) -> str:
1166            expressions = expression.expressions
1167            if (
1168                expressions
1169                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1170                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1171            ):
1172                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1173                return "OBJECT"
1174
1175            return super().datatype_sql(expression)
1176
1177        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1178            return self.func(
1179                "TO_NUMBER",
1180                expression.this,
1181                expression.args.get("format"),
1182                expression.args.get("precision"),
1183                expression.args.get("scale"),
1184            )
1185
1186        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1187            milli = expression.args.get("milli")
1188            if milli is not None:
1189                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1190                expression.set("nano", milli_to_nano)
1191
1192            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1193
1194        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1195            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1196                return self.func("TO_GEOGRAPHY", expression.this)
1197            if expression.is_type(exp.DataType.Type.GEOMETRY):
1198                return self.func("TO_GEOMETRY", expression.this)
1199
1200            return super().cast_sql(expression, safe_prefix=safe_prefix)
1201
1202        def trycast_sql(self, expression: exp.TryCast) -> str:
1203            value = expression.this
1204
1205            if value.type is None:
1206                from sqlglot.optimizer.annotate_types import annotate_types
1207
1208                value = annotate_types(value, dialect=self.dialect)
1209
1210            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1211                return super().trycast_sql(expression)
1212
1213            # TRY_CAST only works for string values in Snowflake
1214            return self.cast_sql(expression)
1215
1216        def log_sql(self, expression: exp.Log) -> str:
1217            if not expression.expression:
1218                return self.func("LN", expression.this)
1219
1220            return super().log_sql(expression)
1221
1222        def unnest_sql(self, expression: exp.Unnest) -> str:
1223            unnest_alias = expression.args.get("alias")
1224            offset = expression.args.get("offset")
1225
1226            columns = [
1227                exp.to_identifier("seq"),
1228                exp.to_identifier("key"),
1229                exp.to_identifier("path"),
1230                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1231                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1232                or exp.to_identifier("value"),
1233                exp.to_identifier("this"),
1234            ]
1235
1236            if unnest_alias:
1237                unnest_alias.set("columns", columns)
1238            else:
1239                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1240
1241            table_input = self.sql(expression.expressions[0])
1242            if not table_input.startswith("INPUT =>"):
1243                table_input = f"INPUT => {table_input}"
1244
1245            explode = f"TABLE(FLATTEN({table_input}))"
1246            alias = self.sql(unnest_alias)
1247            alias = f" AS {alias}" if alias else ""
1248            return f"{explode}{alias}"
1249
1250        def show_sql(self, expression: exp.Show) -> str:
1251            terse = "TERSE " if expression.args.get("terse") else ""
1252            history = " HISTORY" if expression.args.get("history") else ""
1253            like = self.sql(expression, "like")
1254            like = f" LIKE {like}" if like else ""
1255
1256            scope = self.sql(expression, "scope")
1257            scope = f" {scope}" if scope else ""
1258
1259            scope_kind = self.sql(expression, "scope_kind")
1260            if scope_kind:
1261                scope_kind = f" IN {scope_kind}"
1262
1263            starts_with = self.sql(expression, "starts_with")
1264            if starts_with:
1265                starts_with = f" STARTS WITH {starts_with}"
1266
1267            limit = self.sql(expression, "limit")
1268
1269            from_ = self.sql(expression, "from")
1270            if from_:
1271                from_ = f" FROM {from_}"
1272
1273            privileges = self.expressions(expression, key="privileges", flat=True)
1274            privileges = f" WITH PRIVILEGES {privileges}" if privileges else ""
1275
1276            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}{privileges}"
1277
1278        def describe_sql(self, expression: exp.Describe) -> str:
1279            # Default to table if kind is unknown
1280            kind_value = expression.args.get("kind") or "TABLE"
1281            kind = f" {kind_value}" if kind_value else ""
1282            this = f" {self.sql(expression, 'this')}"
1283            expressions = self.expressions(expression, flat=True)
1284            expressions = f" {expressions}" if expressions else ""
1285            return f"DESCRIBE{kind}{this}{expressions}"
1286
1287        def generatedasidentitycolumnconstraint_sql(
1288            self, expression: exp.GeneratedAsIdentityColumnConstraint
1289        ) -> str:
1290            start = expression.args.get("start")
1291            start = f" START {start}" if start else ""
1292            increment = expression.args.get("increment")
1293            increment = f" INCREMENT {increment}" if increment else ""
1294            return f"AUTOINCREMENT{start}{increment}"
1295
1296        def cluster_sql(self, expression: exp.Cluster) -> str:
1297            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1298
1299        def struct_sql(self, expression: exp.Struct) -> str:
1300            keys = []
1301            values = []
1302
1303            for i, e in enumerate(expression.expressions):
1304                if isinstance(e, exp.PropertyEQ):
1305                    keys.append(
1306                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1307                    )
1308                    values.append(e.expression)
1309                else:
1310                    keys.append(exp.Literal.string(f"_{i}"))
1311                    values.append(e)
1312
1313            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1314
1315        @unsupported_args("weight", "accuracy")
1316        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1317            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1318
1319        def alterset_sql(self, expression: exp.AlterSet) -> str:
1320            exprs = self.expressions(expression, flat=True)
1321            exprs = f" {exprs}" if exprs else ""
1322            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1323            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1324            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1325            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1326            tag = self.expressions(expression, key="tag", flat=True)
1327            tag = f" TAG {tag}" if tag else ""
1328
1329            return f"SET{exprs}{file_format}{copy_options}{tag}"
1330
1331        def strtotime_sql(self, expression: exp.StrToTime):
1332            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1333            return self.func(
1334                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1335            )
1336
1337        def timestampsub_sql(self, expression: exp.TimestampSub):
1338            return self.sql(
1339                exp.TimestampAdd(
1340                    this=expression.this,
1341                    expression=expression.expression * -1,
1342                    unit=expression.unit,
1343                )
1344            )
1345
1346        def jsonextract_sql(self, expression: exp.JSONExtract):
1347            this = expression.this
1348
1349            # JSON strings are valid coming from other dialects such as BQ
1350            return self.func(
1351                "GET_PATH",
1352                exp.ParseJSON(this=this) if this.is_string else this,
1353                expression.expression,
1354            )
1355
1356        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1357            this = expression.this
1358            if not isinstance(this, exp.TsOrDsToTimestamp):
1359                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1360
1361            return self.func("TO_CHAR", this, self.format_time(expression))
1362
1363        def datesub_sql(self, expression: exp.DateSub) -> str:
1364            value = expression.expression
1365            if value:
1366                value.replace(value * (-1))
1367            else:
1368                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1369
1370            return date_delta_sql("DATEADD")(self, expression)
1371
1372        def select_sql(self, expression: exp.Select) -> str:
1373            limit = expression.args.get("limit")
1374            offset = expression.args.get("offset")
1375            if offset and not limit:
1376                expression.limit(exp.Null(), copy=False)
1377            return super().select_sql(expression)
class Snowflake(sqlglot.dialects.dialect.Dialect):
 337class Snowflake(Dialect):
 338    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 339    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 340    NULL_ORDERING = "nulls_are_large"
 341    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 342    SUPPORTS_USER_DEFINED_TYPES = False
 343    SUPPORTS_SEMI_ANTI_JOIN = False
 344    PREFER_CTE_ALIAS_COLUMN = True
 345    TABLESAMPLE_SIZE_IS_PERCENT = True
 346    COPY_PARAMS_ARE_CSV = False
 347    ARRAY_AGG_INCLUDES_NULLS = None
 348
 349    TIME_MAPPING = {
 350        "YYYY": "%Y",
 351        "yyyy": "%Y",
 352        "YY": "%y",
 353        "yy": "%y",
 354        "MMMM": "%B",
 355        "mmmm": "%B",
 356        "MON": "%b",
 357        "mon": "%b",
 358        "MM": "%m",
 359        "mm": "%m",
 360        "DD": "%d",
 361        "dd": "%-d",
 362        "DY": "%a",
 363        "dy": "%w",
 364        "HH24": "%H",
 365        "hh24": "%H",
 366        "HH12": "%I",
 367        "hh12": "%I",
 368        "MI": "%M",
 369        "mi": "%M",
 370        "SS": "%S",
 371        "ss": "%S",
 372        "FF6": "%f",
 373        "ff6": "%f",
 374    }
 375
 376    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 377        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 378        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 379        if (
 380            isinstance(expression, exp.Identifier)
 381            and isinstance(expression.parent, exp.Table)
 382            and expression.name.lower() == "dual"
 383        ):
 384            return expression  # type: ignore
 385
 386        return super().quote_identifier(expression, identify=identify)
 387
 388    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
 389        SINGLE_TOKENS = jsonpath.JSONPathTokenizer.SINGLE_TOKENS.copy()
 390        SINGLE_TOKENS.pop("$")
 391
 392    class Parser(parser.Parser):
 393        IDENTIFY_PIVOT_STRINGS = True
 394        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 395        COLON_IS_VARIANT_EXTRACT = True
 396
 397        ID_VAR_TOKENS = {
 398            *parser.Parser.ID_VAR_TOKENS,
 399            TokenType.MATCH_CONDITION,
 400        }
 401
 402        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 403        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 404
 405        COLON_PLACEHOLDER_TOKENS = ID_VAR_TOKENS | {TokenType.NUMBER}
 406
 407        FUNCTIONS = {
 408            **parser.Parser.FUNCTIONS,
 409            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 410            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 411            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 412                this=seq_get(args, 1), expression=seq_get(args, 0)
 413            ),
 414            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 415                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 416                start=seq_get(args, 0),
 417                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 418                step=seq_get(args, 2),
 419            ),
 420            "BITXOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 421            "BIT_XOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 422            "BITOR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 423            "BIT_OR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 424            "BITSHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BITSHIFTLEFT"),
 425            "BIT_SHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BIT_SHIFTLEFT"),
 426            "BITSHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BITSHIFTRIGHT"),
 427            "BIT_SHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BIT_SHIFTRIGHT"),
 428            "BOOLXOR": _build_bitwise(exp.Xor, "BOOLXOR"),
 429            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 430            "DATE_TRUNC": _date_trunc_to_time,
 431            "DATEADD": _build_date_time_add(exp.DateAdd),
 432            "DATEDIFF": _build_datediff,
 433            "DIV0": _build_if_from_div0,
 434            "EDITDISTANCE": lambda args: exp.Levenshtein(
 435                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 436            ),
 437            "FLATTEN": exp.Explode.from_arg_list,
 438            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 439                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 440            ),
 441            "HEX_DECODE_BINARY": exp.Unhex.from_arg_list,
 442            "IFF": exp.If.from_arg_list,
 443            "LAST_DAY": lambda args: exp.LastDay(
 444                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 445            ),
 446            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 447            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 448            "NULLIFZERO": _build_if_from_nullifzero,
 449            "OBJECT_CONSTRUCT": _build_object_construct,
 450            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 451            "REGEXP_REPLACE": _build_regexp_replace,
 452            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 453            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 454            "RLIKE": exp.RegexpLike.from_arg_list,
 455            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 456            "TABLE": lambda args: exp.TableFromRows(this=seq_get(args, 0)),
 457            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 458            "TIMEDIFF": _build_datediff,
 459            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 460            "TIMESTAMPDIFF": _build_datediff,
 461            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 462            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 463            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 464            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 465            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 466            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 467            "TRY_TO_TIME": _build_datetime("TRY_TO_TIME", exp.DataType.Type.TIME, safe=True),
 468            "TRY_TO_TIMESTAMP": _build_datetime(
 469                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 470            ),
 471            "TO_CHAR": build_timetostr_or_tochar,
 472            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 473            "TO_NUMBER": lambda args: exp.ToNumber(
 474                this=seq_get(args, 0),
 475                format=seq_get(args, 1),
 476                precision=seq_get(args, 2),
 477                scale=seq_get(args, 3),
 478            ),
 479            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 480            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 481            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 482            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 483            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 484            "TO_VARCHAR": exp.ToChar.from_arg_list,
 485            "ZEROIFNULL": _build_if_from_zeroifnull,
 486        }
 487
 488        FUNCTION_PARSERS = {
 489            **parser.Parser.FUNCTION_PARSERS,
 490            "DATE_PART": lambda self: self._parse_date_part(),
 491            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 492            "LISTAGG": lambda self: self._parse_string_agg(),
 493        }
 494        FUNCTION_PARSERS.pop("TRIM")
 495
 496        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 497
 498        RANGE_PARSERS = {
 499            **parser.Parser.RANGE_PARSERS,
 500            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 501            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 502        }
 503
 504        ALTER_PARSERS = {
 505            **parser.Parser.ALTER_PARSERS,
 506            "UNSET": lambda self: self.expression(
 507                exp.Set,
 508                tag=self._match_text_seq("TAG"),
 509                expressions=self._parse_csv(self._parse_id_var),
 510                unset=True,
 511            ),
 512        }
 513
 514        STATEMENT_PARSERS = {
 515            **parser.Parser.STATEMENT_PARSERS,
 516            TokenType.GET: lambda self: self._parse_get(),
 517            TokenType.PUT: lambda self: self._parse_put(),
 518            TokenType.SHOW: lambda self: self._parse_show(),
 519        }
 520
 521        PROPERTY_PARSERS = {
 522            **parser.Parser.PROPERTY_PARSERS,
 523            "CREDENTIALS": lambda self: self._parse_credentials_property(),
 524            "FILE_FORMAT": lambda self: self._parse_file_format_property(),
 525            "LOCATION": lambda self: self._parse_location_property(),
 526            "TAG": lambda self: self._parse_tag(),
 527            "USING": lambda self: self._match_text_seq("TEMPLATE")
 528            and self.expression(exp.UsingTemplateProperty, this=self._parse_statement()),
 529        }
 530
 531        TYPE_CONVERTERS = {
 532            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 533            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 534        }
 535
 536        SHOW_PARSERS = {
 537            "DATABASES": _show_parser("DATABASES"),
 538            "TERSE DATABASES": _show_parser("DATABASES"),
 539            "SCHEMAS": _show_parser("SCHEMAS"),
 540            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 541            "OBJECTS": _show_parser("OBJECTS"),
 542            "TERSE OBJECTS": _show_parser("OBJECTS"),
 543            "TABLES": _show_parser("TABLES"),
 544            "TERSE TABLES": _show_parser("TABLES"),
 545            "VIEWS": _show_parser("VIEWS"),
 546            "TERSE VIEWS": _show_parser("VIEWS"),
 547            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 548            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 549            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 550            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 551            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 552            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 553            "SEQUENCES": _show_parser("SEQUENCES"),
 554            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 555            "STAGES": _show_parser("STAGES"),
 556            "COLUMNS": _show_parser("COLUMNS"),
 557            "USERS": _show_parser("USERS"),
 558            "TERSE USERS": _show_parser("USERS"),
 559            "FILE FORMATS": _show_parser("FILE FORMATS"),
 560            "FUNCTIONS": _show_parser("FUNCTIONS"),
 561            "PROCEDURES": _show_parser("PROCEDURES"),
 562            "WAREHOUSES": _show_parser("WAREHOUSES"),
 563        }
 564
 565        CONSTRAINT_PARSERS = {
 566            **parser.Parser.CONSTRAINT_PARSERS,
 567            "WITH": lambda self: self._parse_with_constraint(),
 568            "MASKING": lambda self: self._parse_with_constraint(),
 569            "PROJECTION": lambda self: self._parse_with_constraint(),
 570            "TAG": lambda self: self._parse_with_constraint(),
 571        }
 572
 573        STAGED_FILE_SINGLE_TOKENS = {
 574            TokenType.DOT,
 575            TokenType.MOD,
 576            TokenType.SLASH,
 577        }
 578
 579        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 580
 581        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 582
 583        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 584
 585        LAMBDAS = {
 586            **parser.Parser.LAMBDAS,
 587            TokenType.ARROW: lambda self, expressions: self.expression(
 588                exp.Lambda,
 589                this=self._replace_lambda(
 590                    self._parse_assignment(),
 591                    expressions,
 592                ),
 593                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 594            ),
 595        }
 596
 597        def _parse_use(self) -> exp.Use:
 598            if self._match_text_seq("SECONDARY", "ROLES"):
 599                this = self._match_texts(("ALL", "NONE")) and exp.var(self._prev.text.upper())
 600                roles = None if this else self._parse_csv(lambda: self._parse_table(schema=False))
 601                return self.expression(
 602                    exp.Use, kind="SECONDARY ROLES", this=this, expressions=roles
 603                )
 604
 605            return super()._parse_use()
 606
 607        def _negate_range(
 608            self, this: t.Optional[exp.Expression] = None
 609        ) -> t.Optional[exp.Expression]:
 610            if not this:
 611                return this
 612
 613            query = this.args.get("query")
 614            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 615                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 616                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 617                # which can produce different results (most likely a SnowFlake bug).
 618                #
 619                # https://docs.snowflake.com/en/sql-reference/functions/in
 620                # Context: https://github.com/tobymao/sqlglot/issues/3890
 621                return self.expression(
 622                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 623                )
 624
 625            return self.expression(exp.Not, this=this)
 626
 627        def _parse_tag(self) -> exp.Tags:
 628            return self.expression(
 629                exp.Tags,
 630                expressions=self._parse_wrapped_csv(self._parse_property),
 631            )
 632
 633        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 634            if self._prev.token_type != TokenType.WITH:
 635                self._retreat(self._index - 1)
 636
 637            if self._match_text_seq("MASKING", "POLICY"):
 638                policy = self._parse_column()
 639                return self.expression(
 640                    exp.MaskingPolicyColumnConstraint,
 641                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 642                    expressions=self._match(TokenType.USING)
 643                    and self._parse_wrapped_csv(self._parse_id_var),
 644                )
 645            if self._match_text_seq("PROJECTION", "POLICY"):
 646                policy = self._parse_column()
 647                return self.expression(
 648                    exp.ProjectionPolicyColumnConstraint,
 649                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 650                )
 651            if self._match(TokenType.TAG):
 652                return self._parse_tag()
 653
 654            return None
 655
 656        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
 657            if self._match(TokenType.TAG):
 658                return self._parse_tag()
 659
 660            return super()._parse_with_property()
 661
 662        def _parse_create(self) -> exp.Create | exp.Command:
 663            expression = super()._parse_create()
 664            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 665                # Replace the Table node with the enclosed Identifier
 666                expression.this.replace(expression.this.this)
 667
 668            return expression
 669
 670        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 671        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 672        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 673            this = self._parse_var() or self._parse_type()
 674
 675            if not this:
 676                return None
 677
 678            self._match(TokenType.COMMA)
 679            expression = self._parse_bitwise()
 680            this = map_date_part(this)
 681            name = this.name.upper()
 682
 683            if name.startswith("EPOCH"):
 684                if name == "EPOCH_MILLISECOND":
 685                    scale = 10**3
 686                elif name == "EPOCH_MICROSECOND":
 687                    scale = 10**6
 688                elif name == "EPOCH_NANOSECOND":
 689                    scale = 10**9
 690                else:
 691                    scale = None
 692
 693                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 694                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 695
 696                if scale:
 697                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 698
 699                return to_unix
 700
 701            return self.expression(exp.Extract, this=this, expression=expression)
 702
 703        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 704            if is_map:
 705                # Keys are strings in Snowflake's objects, see also:
 706                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 707                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 708                return self._parse_slice(self._parse_string())
 709
 710            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 711
 712        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 713            lateral = super()._parse_lateral()
 714            if not lateral:
 715                return lateral
 716
 717            if isinstance(lateral.this, exp.Explode):
 718                table_alias = lateral.args.get("alias")
 719                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 720                if table_alias and not table_alias.args.get("columns"):
 721                    table_alias.set("columns", columns)
 722                elif not table_alias:
 723                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 724
 725            return lateral
 726
 727        def _parse_table_parts(
 728            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 729        ) -> exp.Table:
 730            # https://docs.snowflake.com/en/user-guide/querying-stage
 731            if self._match(TokenType.STRING, advance=False):
 732                table = self._parse_string()
 733            elif self._match_text_seq("@", advance=False):
 734                table = self._parse_location_path()
 735            else:
 736                table = None
 737
 738            if table:
 739                file_format = None
 740                pattern = None
 741
 742                wrapped = self._match(TokenType.L_PAREN)
 743                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 744                    if self._match_text_seq("FILE_FORMAT", "=>"):
 745                        file_format = self._parse_string() or super()._parse_table_parts(
 746                            is_db_reference=is_db_reference
 747                        )
 748                    elif self._match_text_seq("PATTERN", "=>"):
 749                        pattern = self._parse_string()
 750                    else:
 751                        break
 752
 753                    self._match(TokenType.COMMA)
 754
 755                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 756            else:
 757                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 758
 759            return table
 760
 761        def _parse_table(
 762            self,
 763            schema: bool = False,
 764            joins: bool = False,
 765            alias_tokens: t.Optional[t.Collection[TokenType]] = None,
 766            parse_bracket: bool = False,
 767            is_db_reference: bool = False,
 768            parse_partition: bool = False,
 769        ) -> t.Optional[exp.Expression]:
 770            table = super()._parse_table(
 771                schema=schema,
 772                joins=joins,
 773                alias_tokens=alias_tokens,
 774                parse_bracket=parse_bracket,
 775                is_db_reference=is_db_reference,
 776                parse_partition=parse_partition,
 777            )
 778            if isinstance(table, exp.Table) and isinstance(table.this, exp.TableFromRows):
 779                table_from_rows = table.this
 780                for arg in exp.TableFromRows.arg_types:
 781                    if arg != "this":
 782                        table_from_rows.set(arg, table.args.get(arg))
 783
 784                table = table_from_rows
 785
 786            return table
 787
 788        def _parse_id_var(
 789            self,
 790            any_token: bool = True,
 791            tokens: t.Optional[t.Collection[TokenType]] = None,
 792        ) -> t.Optional[exp.Expression]:
 793            if self._match_text_seq("IDENTIFIER", "("):
 794                identifier = (
 795                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 796                    or self._parse_string()
 797                )
 798                self._match_r_paren()
 799                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 800
 801            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 802
 803        def _parse_show_snowflake(self, this: str) -> exp.Show:
 804            scope = None
 805            scope_kind = None
 806
 807            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 808            # which is syntactically valid but has no effect on the output
 809            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 810
 811            history = self._match_text_seq("HISTORY")
 812
 813            like = self._parse_string() if self._match(TokenType.LIKE) else None
 814
 815            if self._match(TokenType.IN):
 816                if self._match_text_seq("ACCOUNT"):
 817                    scope_kind = "ACCOUNT"
 818                elif self._match_text_seq("CLASS"):
 819                    scope_kind = "CLASS"
 820                    scope = self._parse_table_parts()
 821                elif self._match_text_seq("APPLICATION"):
 822                    scope_kind = "APPLICATION"
 823                    if self._match_text_seq("PACKAGE"):
 824                        scope_kind += " PACKAGE"
 825                    scope = self._parse_table_parts()
 826                elif self._match_set(self.DB_CREATABLES):
 827                    scope_kind = self._prev.text.upper()
 828                    if self._curr:
 829                        scope = self._parse_table_parts()
 830                elif self._curr:
 831                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 832                    scope = self._parse_table_parts()
 833
 834            return self.expression(
 835                exp.Show,
 836                **{
 837                    "terse": terse,
 838                    "this": this,
 839                    "history": history,
 840                    "like": like,
 841                    "scope": scope,
 842                    "scope_kind": scope_kind,
 843                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 844                    "limit": self._parse_limit(),
 845                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 846                    "privileges": self._match_text_seq("WITH", "PRIVILEGES")
 847                    and self._parse_csv(lambda: self._parse_var(any_token=True, upper=True)),
 848                },
 849            )
 850
 851        def _parse_put(self) -> exp.Put | exp.Command:
 852            if self._curr.token_type != TokenType.STRING:
 853                return self._parse_as_command(self._prev)
 854
 855            return self.expression(
 856                exp.Put,
 857                this=self._parse_string(),
 858                target=self._parse_location_path(),
 859                properties=self._parse_properties(),
 860            )
 861
 862        def _parse_get(self) -> exp.Get | exp.Command:
 863            start = self._prev
 864            target = self._parse_location_path()
 865
 866            # Parse as command if unquoted file path
 867            if self._curr.token_type == TokenType.URI_START:
 868                return self._parse_as_command(start)
 869
 870            return self.expression(
 871                exp.Get,
 872                this=self._parse_string(),
 873                target=target,
 874                properties=self._parse_properties(),
 875            )
 876
 877        def _parse_location_property(self) -> exp.LocationProperty:
 878            self._match(TokenType.EQ)
 879            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 880
 881        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 882            # Parse either a subquery or a staged file
 883            return (
 884                self._parse_select(table=True, parse_subquery_alias=False)
 885                if self._match(TokenType.L_PAREN, advance=False)
 886                else self._parse_table_parts()
 887            )
 888
 889        def _parse_location_path(self) -> exp.Var:
 890            start = self._curr
 891            self._advance_any(ignore_reserved=True)
 892
 893            # We avoid consuming a comma token because external tables like @foo and @bar
 894            # can be joined in a query with a comma separator, as well as closing paren
 895            # in case of subqueries
 896            while self._is_connected() and not self._match_set(
 897                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 898            ):
 899                self._advance_any(ignore_reserved=True)
 900
 901            return exp.var(self._find_sql(start, self._prev))
 902
 903        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 904            this = super()._parse_lambda_arg()
 905
 906            if not this:
 907                return this
 908
 909            typ = self._parse_types()
 910
 911            if typ:
 912                return self.expression(exp.Cast, this=this, to=typ)
 913
 914            return this
 915
 916        def _parse_foreign_key(self) -> exp.ForeignKey:
 917            # inlineFK, the REFERENCES columns are implied
 918            if self._match(TokenType.REFERENCES, advance=False):
 919                return self.expression(exp.ForeignKey)
 920
 921            # outoflineFK, explicitly names the columns
 922            return super()._parse_foreign_key()
 923
 924        def _parse_file_format_property(self) -> exp.FileFormatProperty:
 925            self._match(TokenType.EQ)
 926            if self._match(TokenType.L_PAREN, advance=False):
 927                expressions = self._parse_wrapped_options()
 928            else:
 929                expressions = [self._parse_format_name()]
 930
 931            return self.expression(
 932                exp.FileFormatProperty,
 933                expressions=expressions,
 934            )
 935
 936        def _parse_credentials_property(self) -> exp.CredentialsProperty:
 937            return self.expression(
 938                exp.CredentialsProperty,
 939                expressions=self._parse_wrapped_options(),
 940            )
 941
 942    class Tokenizer(tokens.Tokenizer):
 943        STRING_ESCAPES = ["\\", "'"]
 944        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 945        RAW_STRINGS = ["$$"]
 946        COMMENTS = ["--", "//", ("/*", "*/")]
 947        NESTED_COMMENTS = False
 948
 949        KEYWORDS = {
 950            **tokens.Tokenizer.KEYWORDS,
 951            "FILE://": TokenType.URI_START,
 952            "BYTEINT": TokenType.INT,
 953            "CHAR VARYING": TokenType.VARCHAR,
 954            "CHARACTER VARYING": TokenType.VARCHAR,
 955            "EXCLUDE": TokenType.EXCEPT,
 956            "FILE FORMAT": TokenType.FILE_FORMAT,
 957            "GET": TokenType.GET,
 958            "ILIKE ANY": TokenType.ILIKE_ANY,
 959            "LIKE ANY": TokenType.LIKE_ANY,
 960            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 961            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 962            "MINUS": TokenType.EXCEPT,
 963            "NCHAR VARYING": TokenType.VARCHAR,
 964            "PUT": TokenType.PUT,
 965            "REMOVE": TokenType.COMMAND,
 966            "RM": TokenType.COMMAND,
 967            "SAMPLE": TokenType.TABLE_SAMPLE,
 968            "SQL_DOUBLE": TokenType.DOUBLE,
 969            "SQL_VARCHAR": TokenType.VARCHAR,
 970            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 971            "TAG": TokenType.TAG,
 972            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 973            "TOP": TokenType.TOP,
 974            "WAREHOUSE": TokenType.WAREHOUSE,
 975            "STAGE": TokenType.STAGE,
 976            "STREAMLIT": TokenType.STREAMLIT,
 977        }
 978        KEYWORDS.pop("/*+")
 979
 980        SINGLE_TOKENS = {
 981            **tokens.Tokenizer.SINGLE_TOKENS,
 982            "$": TokenType.PARAMETER,
 983        }
 984
 985        VAR_SINGLE_TOKENS = {"$"}
 986
 987        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 988
 989    class Generator(generator.Generator):
 990        PARAMETER_TOKEN = "$"
 991        MATCHED_BY_SOURCE = False
 992        SINGLE_STRING_INTERVAL = True
 993        JOIN_HINTS = False
 994        TABLE_HINTS = False
 995        QUERY_HINTS = False
 996        AGGREGATE_FILTER_SUPPORTED = False
 997        SUPPORTS_TABLE_COPY = False
 998        COLLATE_IS_FUNC = True
 999        LIMIT_ONLY_LITERALS = True
1000        JSON_KEY_VALUE_PAIR_SEP = ","
1001        INSERT_OVERWRITE = " OVERWRITE INTO"
1002        STRUCT_DELIMITER = ("(", ")")
1003        COPY_PARAMS_ARE_WRAPPED = False
1004        COPY_PARAMS_EQ_REQUIRED = True
1005        STAR_EXCEPT = "EXCLUDE"
1006        SUPPORTS_EXPLODING_PROJECTIONS = False
1007        ARRAY_CONCAT_IS_VAR_LEN = False
1008        SUPPORTS_CONVERT_TIMEZONE = True
1009        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
1010        SUPPORTS_MEDIAN = True
1011        ARRAY_SIZE_NAME = "ARRAY_SIZE"
1012
1013        TRANSFORMS = {
1014            **generator.Generator.TRANSFORMS,
1015            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1016            exp.ArgMax: rename_func("MAX_BY"),
1017            exp.ArgMin: rename_func("MIN_BY"),
1018            exp.Array: inline_array_sql,
1019            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
1020            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
1021            exp.AtTimeZone: lambda self, e: self.func(
1022                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
1023            ),
1024            exp.BitwiseOr: rename_func("BITOR"),
1025            exp.BitwiseXor: rename_func("BITXOR"),
1026            exp.BitwiseLeftShift: rename_func("BITSHIFTLEFT"),
1027            exp.BitwiseRightShift: rename_func("BITSHIFTRIGHT"),
1028            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
1029            exp.DateAdd: date_delta_sql("DATEADD"),
1030            exp.DateDiff: date_delta_sql("DATEDIFF"),
1031            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
1032            exp.DatetimeDiff: timestampdiff_sql,
1033            exp.DateStrToDate: datestrtodate_sql,
1034            exp.DayOfMonth: rename_func("DAYOFMONTH"),
1035            exp.DayOfWeek: rename_func("DAYOFWEEK"),
1036            exp.DayOfYear: rename_func("DAYOFYEAR"),
1037            exp.Explode: rename_func("FLATTEN"),
1038            exp.Extract: rename_func("DATE_PART"),
1039            exp.FileFormatProperty: lambda self,
1040            e: f"FILE_FORMAT=({self.expressions(e, 'expressions', sep=' ')})",
1041            exp.FromTimeZone: lambda self, e: self.func(
1042                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
1043            ),
1044            exp.GenerateSeries: lambda self, e: self.func(
1045                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
1046            ),
1047            exp.GroupConcat: lambda self, e: groupconcat_sql(self, e, sep=""),
1048            exp.If: if_sql(name="IFF", false_value="NULL"),
1049            exp.JSONExtractArray: _json_extract_value_array_sql,
1050            exp.JSONExtractScalar: lambda self, e: self.func(
1051                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
1052            ),
1053            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
1054            exp.JSONPathRoot: lambda *_: "",
1055            exp.JSONValueArray: _json_extract_value_array_sql,
1056            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
1057                rename_func("EDITDISTANCE")
1058            ),
1059            exp.LocationProperty: lambda self, e: f"LOCATION={self.sql(e, 'this')}",
1060            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
1061            exp.LogicalOr: rename_func("BOOLOR_AGG"),
1062            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1063            exp.MakeInterval: no_make_interval_sql,
1064            exp.Max: max_or_greatest,
1065            exp.Min: min_or_least,
1066            exp.ParseJSON: lambda self, e: self.func(
1067                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
1068            ),
1069            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1070            exp.PercentileCont: transforms.preprocess(
1071                [transforms.add_within_group_for_percentiles]
1072            ),
1073            exp.PercentileDisc: transforms.preprocess(
1074                [transforms.add_within_group_for_percentiles]
1075            ),
1076            exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]),
1077            exp.RegexpExtract: _regexpextract_sql,
1078            exp.RegexpExtractAll: _regexpextract_sql,
1079            exp.RegexpILike: _regexpilike_sql,
1080            exp.Rand: rename_func("RANDOM"),
1081            exp.Select: transforms.preprocess(
1082                [
1083                    transforms.eliminate_distinct_on,
1084                    transforms.explode_projection_to_unnest(),
1085                    transforms.eliminate_semi_and_anti_joins,
1086                    _transform_generate_date_array,
1087                ]
1088            ),
1089            exp.SHA: rename_func("SHA1"),
1090            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
1091            exp.StartsWith: rename_func("STARTSWITH"),
1092            exp.StrPosition: lambda self, e: strposition_sql(
1093                self, e, func_name="CHARINDEX", supports_position=True
1094            ),
1095            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
1096            exp.Stuff: rename_func("INSERT"),
1097            exp.TimeAdd: date_delta_sql("TIMEADD"),
1098            exp.Timestamp: no_timestamp_sql,
1099            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
1100            exp.TimestampDiff: lambda self, e: self.func(
1101                "TIMESTAMPDIFF", e.unit, e.expression, e.this
1102            ),
1103            exp.TimestampTrunc: timestamptrunc_sql(),
1104            exp.TimeStrToTime: timestrtotime_sql,
1105            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
1106            exp.ToArray: rename_func("TO_ARRAY"),
1107            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
1108            exp.ToDouble: rename_func("TO_DOUBLE"),
1109            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
1110            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
1111            exp.TsOrDsToDate: lambda self, e: self.func(
1112                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
1113            ),
1114            exp.TsOrDsToTime: lambda self, e: self.func(
1115                "TRY_TO_TIME" if e.args.get("safe") else "TO_TIME", e.this, self.format_time(e)
1116            ),
1117            exp.Unhex: rename_func("HEX_DECODE_BINARY"),
1118            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
1119            exp.Uuid: rename_func("UUID_STRING"),
1120            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1121            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
1122            exp.Xor: rename_func("BOOLXOR"),
1123        }
1124
1125        SUPPORTED_JSON_PATH_PARTS = {
1126            exp.JSONPathKey,
1127            exp.JSONPathRoot,
1128            exp.JSONPathSubscript,
1129        }
1130
1131        TYPE_MAPPING = {
1132            **generator.Generator.TYPE_MAPPING,
1133            exp.DataType.Type.NESTED: "OBJECT",
1134            exp.DataType.Type.STRUCT: "OBJECT",
1135        }
1136
1137        TOKEN_MAPPING = {
1138            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
1139        }
1140
1141        PROPERTIES_LOCATION = {
1142            **generator.Generator.PROPERTIES_LOCATION,
1143            exp.CredentialsProperty: exp.Properties.Location.POST_WITH,
1144            exp.LocationProperty: exp.Properties.Location.POST_WITH,
1145            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1146            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
1147            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1148        }
1149
1150        UNSUPPORTED_VALUES_EXPRESSIONS = {
1151            exp.Map,
1152            exp.StarMap,
1153            exp.Struct,
1154            exp.VarMap,
1155        }
1156
1157        def with_properties(self, properties: exp.Properties) -> str:
1158            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1159
1160        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1161            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1162                values_as_table = False
1163
1164            return super().values_sql(expression, values_as_table=values_as_table)
1165
1166        def datatype_sql(self, expression: exp.DataType) -> str:
1167            expressions = expression.expressions
1168            if (
1169                expressions
1170                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1171                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1172            ):
1173                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1174                return "OBJECT"
1175
1176            return super().datatype_sql(expression)
1177
1178        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1179            return self.func(
1180                "TO_NUMBER",
1181                expression.this,
1182                expression.args.get("format"),
1183                expression.args.get("precision"),
1184                expression.args.get("scale"),
1185            )
1186
1187        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1188            milli = expression.args.get("milli")
1189            if milli is not None:
1190                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1191                expression.set("nano", milli_to_nano)
1192
1193            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1194
1195        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1196            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1197                return self.func("TO_GEOGRAPHY", expression.this)
1198            if expression.is_type(exp.DataType.Type.GEOMETRY):
1199                return self.func("TO_GEOMETRY", expression.this)
1200
1201            return super().cast_sql(expression, safe_prefix=safe_prefix)
1202
1203        def trycast_sql(self, expression: exp.TryCast) -> str:
1204            value = expression.this
1205
1206            if value.type is None:
1207                from sqlglot.optimizer.annotate_types import annotate_types
1208
1209                value = annotate_types(value, dialect=self.dialect)
1210
1211            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1212                return super().trycast_sql(expression)
1213
1214            # TRY_CAST only works for string values in Snowflake
1215            return self.cast_sql(expression)
1216
1217        def log_sql(self, expression: exp.Log) -> str:
1218            if not expression.expression:
1219                return self.func("LN", expression.this)
1220
1221            return super().log_sql(expression)
1222
1223        def unnest_sql(self, expression: exp.Unnest) -> str:
1224            unnest_alias = expression.args.get("alias")
1225            offset = expression.args.get("offset")
1226
1227            columns = [
1228                exp.to_identifier("seq"),
1229                exp.to_identifier("key"),
1230                exp.to_identifier("path"),
1231                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1232                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1233                or exp.to_identifier("value"),
1234                exp.to_identifier("this"),
1235            ]
1236
1237            if unnest_alias:
1238                unnest_alias.set("columns", columns)
1239            else:
1240                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1241
1242            table_input = self.sql(expression.expressions[0])
1243            if not table_input.startswith("INPUT =>"):
1244                table_input = f"INPUT => {table_input}"
1245
1246            explode = f"TABLE(FLATTEN({table_input}))"
1247            alias = self.sql(unnest_alias)
1248            alias = f" AS {alias}" if alias else ""
1249            return f"{explode}{alias}"
1250
1251        def show_sql(self, expression: exp.Show) -> str:
1252            terse = "TERSE " if expression.args.get("terse") else ""
1253            history = " HISTORY" if expression.args.get("history") else ""
1254            like = self.sql(expression, "like")
1255            like = f" LIKE {like}" if like else ""
1256
1257            scope = self.sql(expression, "scope")
1258            scope = f" {scope}" if scope else ""
1259
1260            scope_kind = self.sql(expression, "scope_kind")
1261            if scope_kind:
1262                scope_kind = f" IN {scope_kind}"
1263
1264            starts_with = self.sql(expression, "starts_with")
1265            if starts_with:
1266                starts_with = f" STARTS WITH {starts_with}"
1267
1268            limit = self.sql(expression, "limit")
1269
1270            from_ = self.sql(expression, "from")
1271            if from_:
1272                from_ = f" FROM {from_}"
1273
1274            privileges = self.expressions(expression, key="privileges", flat=True)
1275            privileges = f" WITH PRIVILEGES {privileges}" if privileges else ""
1276
1277            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}{privileges}"
1278
1279        def describe_sql(self, expression: exp.Describe) -> str:
1280            # Default to table if kind is unknown
1281            kind_value = expression.args.get("kind") or "TABLE"
1282            kind = f" {kind_value}" if kind_value else ""
1283            this = f" {self.sql(expression, 'this')}"
1284            expressions = self.expressions(expression, flat=True)
1285            expressions = f" {expressions}" if expressions else ""
1286            return f"DESCRIBE{kind}{this}{expressions}"
1287
1288        def generatedasidentitycolumnconstraint_sql(
1289            self, expression: exp.GeneratedAsIdentityColumnConstraint
1290        ) -> str:
1291            start = expression.args.get("start")
1292            start = f" START {start}" if start else ""
1293            increment = expression.args.get("increment")
1294            increment = f" INCREMENT {increment}" if increment else ""
1295            return f"AUTOINCREMENT{start}{increment}"
1296
1297        def cluster_sql(self, expression: exp.Cluster) -> str:
1298            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1299
1300        def struct_sql(self, expression: exp.Struct) -> str:
1301            keys = []
1302            values = []
1303
1304            for i, e in enumerate(expression.expressions):
1305                if isinstance(e, exp.PropertyEQ):
1306                    keys.append(
1307                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1308                    )
1309                    values.append(e.expression)
1310                else:
1311                    keys.append(exp.Literal.string(f"_{i}"))
1312                    values.append(e)
1313
1314            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1315
1316        @unsupported_args("weight", "accuracy")
1317        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1318            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1319
1320        def alterset_sql(self, expression: exp.AlterSet) -> str:
1321            exprs = self.expressions(expression, flat=True)
1322            exprs = f" {exprs}" if exprs else ""
1323            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1324            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1325            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1326            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1327            tag = self.expressions(expression, key="tag", flat=True)
1328            tag = f" TAG {tag}" if tag else ""
1329
1330            return f"SET{exprs}{file_format}{copy_options}{tag}"
1331
1332        def strtotime_sql(self, expression: exp.StrToTime):
1333            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1334            return self.func(
1335                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1336            )
1337
1338        def timestampsub_sql(self, expression: exp.TimestampSub):
1339            return self.sql(
1340                exp.TimestampAdd(
1341                    this=expression.this,
1342                    expression=expression.expression * -1,
1343                    unit=expression.unit,
1344                )
1345            )
1346
1347        def jsonextract_sql(self, expression: exp.JSONExtract):
1348            this = expression.this
1349
1350            # JSON strings are valid coming from other dialects such as BQ
1351            return self.func(
1352                "GET_PATH",
1353                exp.ParseJSON(this=this) if this.is_string else this,
1354                expression.expression,
1355            )
1356
1357        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1358            this = expression.this
1359            if not isinstance(this, exp.TsOrDsToTimestamp):
1360                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1361
1362            return self.func("TO_CHAR", this, self.format_time(expression))
1363
1364        def datesub_sql(self, expression: exp.DateSub) -> str:
1365            value = expression.expression
1366            if value:
1367                value.replace(value * (-1))
1368            else:
1369                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1370
1371            return date_delta_sql("DATEADD")(self, expression)
1372
1373        def select_sql(self, expression: exp.Select) -> str:
1374            limit = expression.args.get("limit")
1375            offset = expression.args.get("offset")
1376            if offset and not limit:
1377                expression.limit(exp.Null(), copy=False)
1378            return super().select_sql(expression)
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

ARRAY_AGG_INCLUDES_NULLS: Optional[bool] = None

Whether ArrayAgg needs to filter NULL values.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
376    def quote_identifier(self, expression: E, identify: bool = True) -> E:
377        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
378        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
379        if (
380            isinstance(expression, exp.Identifier)
381            and isinstance(expression.parent, exp.Table)
382            and expression.name.lower() == "dual"
383        ):
384            return expression  # type: ignore
385
386        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'Snowflake.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {'6': {0: True}}}, 'f': {'f': {'6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {'6': {0: True}}}, 'f': {'f': {'6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {}
INVERSE_FORMAT_TRIE: Dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.JSONPathTokenizer(sqlglot.jsonpath.JSONPathTokenizer):
388    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
389        SINGLE_TOKENS = jsonpath.JSONPathTokenizer.SINGLE_TOKENS.copy()
390        SINGLE_TOKENS.pop("$")
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '-': <TokenType.DASH: 'DASH'>, '.': <TokenType.DOT: 'DOT'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '"': <TokenType.QUOTE: 'QUOTE'>, '*': <TokenType.STAR: 'STAR'>}
class Snowflake.Parser(sqlglot.parser.Parser):
392    class Parser(parser.Parser):
393        IDENTIFY_PIVOT_STRINGS = True
394        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
395        COLON_IS_VARIANT_EXTRACT = True
396
397        ID_VAR_TOKENS = {
398            *parser.Parser.ID_VAR_TOKENS,
399            TokenType.MATCH_CONDITION,
400        }
401
402        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
403        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
404
405        COLON_PLACEHOLDER_TOKENS = ID_VAR_TOKENS | {TokenType.NUMBER}
406
407        FUNCTIONS = {
408            **parser.Parser.FUNCTIONS,
409            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
410            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
411            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
412                this=seq_get(args, 1), expression=seq_get(args, 0)
413            ),
414            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
415                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
416                start=seq_get(args, 0),
417                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
418                step=seq_get(args, 2),
419            ),
420            "BITXOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
421            "BIT_XOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
422            "BITOR": _build_bitwise(exp.BitwiseOr, "BITOR"),
423            "BIT_OR": _build_bitwise(exp.BitwiseOr, "BITOR"),
424            "BITSHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BITSHIFTLEFT"),
425            "BIT_SHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BIT_SHIFTLEFT"),
426            "BITSHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BITSHIFTRIGHT"),
427            "BIT_SHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BIT_SHIFTRIGHT"),
428            "BOOLXOR": _build_bitwise(exp.Xor, "BOOLXOR"),
429            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
430            "DATE_TRUNC": _date_trunc_to_time,
431            "DATEADD": _build_date_time_add(exp.DateAdd),
432            "DATEDIFF": _build_datediff,
433            "DIV0": _build_if_from_div0,
434            "EDITDISTANCE": lambda args: exp.Levenshtein(
435                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
436            ),
437            "FLATTEN": exp.Explode.from_arg_list,
438            "GET_PATH": lambda args, dialect: exp.JSONExtract(
439                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
440            ),
441            "HEX_DECODE_BINARY": exp.Unhex.from_arg_list,
442            "IFF": exp.If.from_arg_list,
443            "LAST_DAY": lambda args: exp.LastDay(
444                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
445            ),
446            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
447            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
448            "NULLIFZERO": _build_if_from_nullifzero,
449            "OBJECT_CONSTRUCT": _build_object_construct,
450            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
451            "REGEXP_REPLACE": _build_regexp_replace,
452            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
453            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
454            "RLIKE": exp.RegexpLike.from_arg_list,
455            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
456            "TABLE": lambda args: exp.TableFromRows(this=seq_get(args, 0)),
457            "TIMEADD": _build_date_time_add(exp.TimeAdd),
458            "TIMEDIFF": _build_datediff,
459            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
460            "TIMESTAMPDIFF": _build_datediff,
461            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
462            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
463            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
464            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
465            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
466            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
467            "TRY_TO_TIME": _build_datetime("TRY_TO_TIME", exp.DataType.Type.TIME, safe=True),
468            "TRY_TO_TIMESTAMP": _build_datetime(
469                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
470            ),
471            "TO_CHAR": build_timetostr_or_tochar,
472            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
473            "TO_NUMBER": lambda args: exp.ToNumber(
474                this=seq_get(args, 0),
475                format=seq_get(args, 1),
476                precision=seq_get(args, 2),
477                scale=seq_get(args, 3),
478            ),
479            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
480            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
481            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
482            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
483            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
484            "TO_VARCHAR": exp.ToChar.from_arg_list,
485            "ZEROIFNULL": _build_if_from_zeroifnull,
486        }
487
488        FUNCTION_PARSERS = {
489            **parser.Parser.FUNCTION_PARSERS,
490            "DATE_PART": lambda self: self._parse_date_part(),
491            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
492            "LISTAGG": lambda self: self._parse_string_agg(),
493        }
494        FUNCTION_PARSERS.pop("TRIM")
495
496        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
497
498        RANGE_PARSERS = {
499            **parser.Parser.RANGE_PARSERS,
500            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
501            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
502        }
503
504        ALTER_PARSERS = {
505            **parser.Parser.ALTER_PARSERS,
506            "UNSET": lambda self: self.expression(
507                exp.Set,
508                tag=self._match_text_seq("TAG"),
509                expressions=self._parse_csv(self._parse_id_var),
510                unset=True,
511            ),
512        }
513
514        STATEMENT_PARSERS = {
515            **parser.Parser.STATEMENT_PARSERS,
516            TokenType.GET: lambda self: self._parse_get(),
517            TokenType.PUT: lambda self: self._parse_put(),
518            TokenType.SHOW: lambda self: self._parse_show(),
519        }
520
521        PROPERTY_PARSERS = {
522            **parser.Parser.PROPERTY_PARSERS,
523            "CREDENTIALS": lambda self: self._parse_credentials_property(),
524            "FILE_FORMAT": lambda self: self._parse_file_format_property(),
525            "LOCATION": lambda self: self._parse_location_property(),
526            "TAG": lambda self: self._parse_tag(),
527            "USING": lambda self: self._match_text_seq("TEMPLATE")
528            and self.expression(exp.UsingTemplateProperty, this=self._parse_statement()),
529        }
530
531        TYPE_CONVERTERS = {
532            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
533            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
534        }
535
536        SHOW_PARSERS = {
537            "DATABASES": _show_parser("DATABASES"),
538            "TERSE DATABASES": _show_parser("DATABASES"),
539            "SCHEMAS": _show_parser("SCHEMAS"),
540            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
541            "OBJECTS": _show_parser("OBJECTS"),
542            "TERSE OBJECTS": _show_parser("OBJECTS"),
543            "TABLES": _show_parser("TABLES"),
544            "TERSE TABLES": _show_parser("TABLES"),
545            "VIEWS": _show_parser("VIEWS"),
546            "TERSE VIEWS": _show_parser("VIEWS"),
547            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
548            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
549            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
550            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
551            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
552            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
553            "SEQUENCES": _show_parser("SEQUENCES"),
554            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
555            "STAGES": _show_parser("STAGES"),
556            "COLUMNS": _show_parser("COLUMNS"),
557            "USERS": _show_parser("USERS"),
558            "TERSE USERS": _show_parser("USERS"),
559            "FILE FORMATS": _show_parser("FILE FORMATS"),
560            "FUNCTIONS": _show_parser("FUNCTIONS"),
561            "PROCEDURES": _show_parser("PROCEDURES"),
562            "WAREHOUSES": _show_parser("WAREHOUSES"),
563        }
564
565        CONSTRAINT_PARSERS = {
566            **parser.Parser.CONSTRAINT_PARSERS,
567            "WITH": lambda self: self._parse_with_constraint(),
568            "MASKING": lambda self: self._parse_with_constraint(),
569            "PROJECTION": lambda self: self._parse_with_constraint(),
570            "TAG": lambda self: self._parse_with_constraint(),
571        }
572
573        STAGED_FILE_SINGLE_TOKENS = {
574            TokenType.DOT,
575            TokenType.MOD,
576            TokenType.SLASH,
577        }
578
579        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
580
581        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
582
583        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
584
585        LAMBDAS = {
586            **parser.Parser.LAMBDAS,
587            TokenType.ARROW: lambda self, expressions: self.expression(
588                exp.Lambda,
589                this=self._replace_lambda(
590                    self._parse_assignment(),
591                    expressions,
592                ),
593                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
594            ),
595        }
596
597        def _parse_use(self) -> exp.Use:
598            if self._match_text_seq("SECONDARY", "ROLES"):
599                this = self._match_texts(("ALL", "NONE")) and exp.var(self._prev.text.upper())
600                roles = None if this else self._parse_csv(lambda: self._parse_table(schema=False))
601                return self.expression(
602                    exp.Use, kind="SECONDARY ROLES", this=this, expressions=roles
603                )
604
605            return super()._parse_use()
606
607        def _negate_range(
608            self, this: t.Optional[exp.Expression] = None
609        ) -> t.Optional[exp.Expression]:
610            if not this:
611                return this
612
613            query = this.args.get("query")
614            if isinstance(this, exp.In) and isinstance(query, exp.Query):
615                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
616                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
617                # which can produce different results (most likely a SnowFlake bug).
618                #
619                # https://docs.snowflake.com/en/sql-reference/functions/in
620                # Context: https://github.com/tobymao/sqlglot/issues/3890
621                return self.expression(
622                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
623                )
624
625            return self.expression(exp.Not, this=this)
626
627        def _parse_tag(self) -> exp.Tags:
628            return self.expression(
629                exp.Tags,
630                expressions=self._parse_wrapped_csv(self._parse_property),
631            )
632
633        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
634            if self._prev.token_type != TokenType.WITH:
635                self._retreat(self._index - 1)
636
637            if self._match_text_seq("MASKING", "POLICY"):
638                policy = self._parse_column()
639                return self.expression(
640                    exp.MaskingPolicyColumnConstraint,
641                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
642                    expressions=self._match(TokenType.USING)
643                    and self._parse_wrapped_csv(self._parse_id_var),
644                )
645            if self._match_text_seq("PROJECTION", "POLICY"):
646                policy = self._parse_column()
647                return self.expression(
648                    exp.ProjectionPolicyColumnConstraint,
649                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
650                )
651            if self._match(TokenType.TAG):
652                return self._parse_tag()
653
654            return None
655
656        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
657            if self._match(TokenType.TAG):
658                return self._parse_tag()
659
660            return super()._parse_with_property()
661
662        def _parse_create(self) -> exp.Create | exp.Command:
663            expression = super()._parse_create()
664            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
665                # Replace the Table node with the enclosed Identifier
666                expression.this.replace(expression.this.this)
667
668            return expression
669
670        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
671        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
672        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
673            this = self._parse_var() or self._parse_type()
674
675            if not this:
676                return None
677
678            self._match(TokenType.COMMA)
679            expression = self._parse_bitwise()
680            this = map_date_part(this)
681            name = this.name.upper()
682
683            if name.startswith("EPOCH"):
684                if name == "EPOCH_MILLISECOND":
685                    scale = 10**3
686                elif name == "EPOCH_MICROSECOND":
687                    scale = 10**6
688                elif name == "EPOCH_NANOSECOND":
689                    scale = 10**9
690                else:
691                    scale = None
692
693                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
694                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
695
696                if scale:
697                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
698
699                return to_unix
700
701            return self.expression(exp.Extract, this=this, expression=expression)
702
703        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
704            if is_map:
705                # Keys are strings in Snowflake's objects, see also:
706                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
707                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
708                return self._parse_slice(self._parse_string())
709
710            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
711
712        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
713            lateral = super()._parse_lateral()
714            if not lateral:
715                return lateral
716
717            if isinstance(lateral.this, exp.Explode):
718                table_alias = lateral.args.get("alias")
719                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
720                if table_alias and not table_alias.args.get("columns"):
721                    table_alias.set("columns", columns)
722                elif not table_alias:
723                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
724
725            return lateral
726
727        def _parse_table_parts(
728            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
729        ) -> exp.Table:
730            # https://docs.snowflake.com/en/user-guide/querying-stage
731            if self._match(TokenType.STRING, advance=False):
732                table = self._parse_string()
733            elif self._match_text_seq("@", advance=False):
734                table = self._parse_location_path()
735            else:
736                table = None
737
738            if table:
739                file_format = None
740                pattern = None
741
742                wrapped = self._match(TokenType.L_PAREN)
743                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
744                    if self._match_text_seq("FILE_FORMAT", "=>"):
745                        file_format = self._parse_string() or super()._parse_table_parts(
746                            is_db_reference=is_db_reference
747                        )
748                    elif self._match_text_seq("PATTERN", "=>"):
749                        pattern = self._parse_string()
750                    else:
751                        break
752
753                    self._match(TokenType.COMMA)
754
755                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
756            else:
757                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
758
759            return table
760
761        def _parse_table(
762            self,
763            schema: bool = False,
764            joins: bool = False,
765            alias_tokens: t.Optional[t.Collection[TokenType]] = None,
766            parse_bracket: bool = False,
767            is_db_reference: bool = False,
768            parse_partition: bool = False,
769        ) -> t.Optional[exp.Expression]:
770            table = super()._parse_table(
771                schema=schema,
772                joins=joins,
773                alias_tokens=alias_tokens,
774                parse_bracket=parse_bracket,
775                is_db_reference=is_db_reference,
776                parse_partition=parse_partition,
777            )
778            if isinstance(table, exp.Table) and isinstance(table.this, exp.TableFromRows):
779                table_from_rows = table.this
780                for arg in exp.TableFromRows.arg_types:
781                    if arg != "this":
782                        table_from_rows.set(arg, table.args.get(arg))
783
784                table = table_from_rows
785
786            return table
787
788        def _parse_id_var(
789            self,
790            any_token: bool = True,
791            tokens: t.Optional[t.Collection[TokenType]] = None,
792        ) -> t.Optional[exp.Expression]:
793            if self._match_text_seq("IDENTIFIER", "("):
794                identifier = (
795                    super()._parse_id_var(any_token=any_token, tokens=tokens)
796                    or self._parse_string()
797                )
798                self._match_r_paren()
799                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
800
801            return super()._parse_id_var(any_token=any_token, tokens=tokens)
802
803        def _parse_show_snowflake(self, this: str) -> exp.Show:
804            scope = None
805            scope_kind = None
806
807            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
808            # which is syntactically valid but has no effect on the output
809            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
810
811            history = self._match_text_seq("HISTORY")
812
813            like = self._parse_string() if self._match(TokenType.LIKE) else None
814
815            if self._match(TokenType.IN):
816                if self._match_text_seq("ACCOUNT"):
817                    scope_kind = "ACCOUNT"
818                elif self._match_text_seq("CLASS"):
819                    scope_kind = "CLASS"
820                    scope = self._parse_table_parts()
821                elif self._match_text_seq("APPLICATION"):
822                    scope_kind = "APPLICATION"
823                    if self._match_text_seq("PACKAGE"):
824                        scope_kind += " PACKAGE"
825                    scope = self._parse_table_parts()
826                elif self._match_set(self.DB_CREATABLES):
827                    scope_kind = self._prev.text.upper()
828                    if self._curr:
829                        scope = self._parse_table_parts()
830                elif self._curr:
831                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
832                    scope = self._parse_table_parts()
833
834            return self.expression(
835                exp.Show,
836                **{
837                    "terse": terse,
838                    "this": this,
839                    "history": history,
840                    "like": like,
841                    "scope": scope,
842                    "scope_kind": scope_kind,
843                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
844                    "limit": self._parse_limit(),
845                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
846                    "privileges": self._match_text_seq("WITH", "PRIVILEGES")
847                    and self._parse_csv(lambda: self._parse_var(any_token=True, upper=True)),
848                },
849            )
850
851        def _parse_put(self) -> exp.Put | exp.Command:
852            if self._curr.token_type != TokenType.STRING:
853                return self._parse_as_command(self._prev)
854
855            return self.expression(
856                exp.Put,
857                this=self._parse_string(),
858                target=self._parse_location_path(),
859                properties=self._parse_properties(),
860            )
861
862        def _parse_get(self) -> exp.Get | exp.Command:
863            start = self._prev
864            target = self._parse_location_path()
865
866            # Parse as command if unquoted file path
867            if self._curr.token_type == TokenType.URI_START:
868                return self._parse_as_command(start)
869
870            return self.expression(
871                exp.Get,
872                this=self._parse_string(),
873                target=target,
874                properties=self._parse_properties(),
875            )
876
877        def _parse_location_property(self) -> exp.LocationProperty:
878            self._match(TokenType.EQ)
879            return self.expression(exp.LocationProperty, this=self._parse_location_path())
880
881        def _parse_file_location(self) -> t.Optional[exp.Expression]:
882            # Parse either a subquery or a staged file
883            return (
884                self._parse_select(table=True, parse_subquery_alias=False)
885                if self._match(TokenType.L_PAREN, advance=False)
886                else self._parse_table_parts()
887            )
888
889        def _parse_location_path(self) -> exp.Var:
890            start = self._curr
891            self._advance_any(ignore_reserved=True)
892
893            # We avoid consuming a comma token because external tables like @foo and @bar
894            # can be joined in a query with a comma separator, as well as closing paren
895            # in case of subqueries
896            while self._is_connected() and not self._match_set(
897                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
898            ):
899                self._advance_any(ignore_reserved=True)
900
901            return exp.var(self._find_sql(start, self._prev))
902
903        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
904            this = super()._parse_lambda_arg()
905
906            if not this:
907                return this
908
909            typ = self._parse_types()
910
911            if typ:
912                return self.expression(exp.Cast, this=this, to=typ)
913
914            return this
915
916        def _parse_foreign_key(self) -> exp.ForeignKey:
917            # inlineFK, the REFERENCES columns are implied
918            if self._match(TokenType.REFERENCES, advance=False):
919                return self.expression(exp.ForeignKey)
920
921            # outoflineFK, explicitly names the columns
922            return super()._parse_foreign_key()
923
924        def _parse_file_format_property(self) -> exp.FileFormatProperty:
925            self._match(TokenType.EQ)
926            if self._match(TokenType.L_PAREN, advance=False):
927                expressions = self._parse_wrapped_options()
928            else:
929                expressions = [self._parse_format_name()]
930
931            return self.expression(
932                exp.FileFormatProperty,
933                expressions=expressions,
934            )
935
936        def _parse_credentials_property(self) -> exp.CredentialsProperty:
937            return self.expression(
938                exp.CredentialsProperty,
939                expressions=self._parse_wrapped_options(),
940            )

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_VARIANT_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.TRUE: 'TRUE'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.JSONB: 'JSONB'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.XML: 'XML'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.ASOF: 'ASOF'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TABLE: 'TABLE'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.PUT: 'PUT'>, <TokenType.UUID: 'UUID'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.KILL: 'KILL'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.INET: 'INET'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.TAG: 'TAG'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.INDEX: 'INDEX'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.BLOB: 'BLOB'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.LEFT: 'LEFT'>, <TokenType.USE: 'USE'>, <TokenType.SINK: 'SINK'>, <TokenType.ENUM: 'ENUM'>, <TokenType.ANTI: 'ANTI'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ROW: 'ROW'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.CACHE: 'CACHE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.NEXT: 'NEXT'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.ALL: 'ALL'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.DESC: 'DESC'>, <TokenType.MODEL: 'MODEL'>, <TokenType.FIRST: 'FIRST'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.ANY: 'ANY'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.TOP: 'TOP'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.INT: 'INT'>, <TokenType.DATE: 'DATE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.FULL: 'FULL'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.SEMI: 'SEMI'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.KEEP: 'KEEP'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.POINT: 'POINT'>, <TokenType.GET: 'GET'>, <TokenType.IPV4: 'IPV4'>, <TokenType.RENAME: 'RENAME'>, <TokenType.TIME: 'TIME'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.CASE: 'CASE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.STAGE: 'STAGE'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATE32: 'DATE32'>, <TokenType.MAP: 'MAP'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.END: 'END'>, <TokenType.NAME: 'NAME'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.IPV6: 'IPV6'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.APPLY: 'APPLY'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.DETACH: 'DETACH'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.RING: 'RING'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.ASC: 'ASC'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UINT: 'UINT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.SET: 'SET'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.VAR: 'VAR'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.VOID: 'VOID'>, <TokenType.IS: 'IS'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.MERGE: 'MERGE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.UINT128: 'UINT128'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CHAR: 'CHAR'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.CUBE: 'CUBE'>, <TokenType.DIV: 'DIV'>, <TokenType.BIT: 'BIT'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.COPY: 'COPY'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.LIST: 'LIST'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INT128: 'INT128'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.SOME: 'SOME'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.JSON: 'JSON'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NULL: 'NULL'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.FINAL: 'FINAL'>, <TokenType.FILTER: 'FILTER'>}
TABLE_ALIAS_TOKENS = {<TokenType.TRUE: 'TRUE'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.JSONB: 'JSONB'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.XML: 'XML'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TABLE: 'TABLE'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.PUT: 'PUT'>, <TokenType.UUID: 'UUID'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.KILL: 'KILL'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.INET: 'INET'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.TAG: 'TAG'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.INDEX: 'INDEX'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.BLOB: 'BLOB'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.USE: 'USE'>, <TokenType.SINK: 'SINK'>, <TokenType.ENUM: 'ENUM'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.ANTI: 'ANTI'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ROW: 'ROW'>, <TokenType.CACHE: 'CACHE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.NEXT: 'NEXT'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.ALL: 'ALL'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.DESC: 'DESC'>, <TokenType.MODEL: 'MODEL'>, <TokenType.FIRST: 'FIRST'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.ANY: 'ANY'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.TOP: 'TOP'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.INT: 'INT'>, <TokenType.DATE: 'DATE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.KEEP: 'KEEP'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.POINT: 'POINT'>, <TokenType.GET: 'GET'>, <TokenType.IPV4: 'IPV4'>, <TokenType.RENAME: 'RENAME'>, <TokenType.TIME: 'TIME'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.CASE: 'CASE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.STAGE: 'STAGE'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATE32: 'DATE32'>, <TokenType.MAP: 'MAP'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.END: 'END'>, <TokenType.NAME: 'NAME'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.IPV6: 'IPV6'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.DETACH: 'DETACH'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.RING: 'RING'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.ASC: 'ASC'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.UINT: 'UINT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.SET: 'SET'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.VAR: 'VAR'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.VOID: 'VOID'>, <TokenType.IS: 'IS'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.MERGE: 'MERGE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CHAR: 'CHAR'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.CUBE: 'CUBE'>, <TokenType.DIV: 'DIV'>, <TokenType.BIT: 'BIT'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.COPY: 'COPY'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.LIST: 'LIST'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INT128: 'INT128'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.SOME: 'SOME'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.JSON: 'JSON'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.NULL: 'NULL'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.FINAL: 'FINAL'>, <TokenType.FILTER: 'FILTER'>}
COLON_PLACEHOLDER_TOKENS = {<TokenType.TRUE: 'TRUE'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.JSONB: 'JSONB'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.XML: 'XML'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.ASOF: 'ASOF'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TABLE: 'TABLE'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.PUT: 'PUT'>, <TokenType.UUID: 'UUID'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.NUMBER: 'NUMBER'>, <TokenType.KILL: 'KILL'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.INET: 'INET'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TAG: 'TAG'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.INDEX: 'INDEX'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.BLOB: 'BLOB'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.LEFT: 'LEFT'>, <TokenType.USE: 'USE'>, <TokenType.SINK: 'SINK'>, <TokenType.ENUM: 'ENUM'>, <TokenType.ANTI: 'ANTI'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ROW: 'ROW'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.CACHE: 'CACHE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.NEXT: 'NEXT'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.ALL: 'ALL'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.DESC: 'DESC'>, <TokenType.MODEL: 'MODEL'>, <TokenType.FIRST: 'FIRST'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.ANY: 'ANY'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.TOP: 'TOP'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.INT: 'INT'>, <TokenType.DATE: 'DATE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.FULL: 'FULL'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.SEMI: 'SEMI'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.KEEP: 'KEEP'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.POINT: 'POINT'>, <TokenType.GET: 'GET'>, <TokenType.IPV4: 'IPV4'>, <TokenType.RENAME: 'RENAME'>, <TokenType.TIME: 'TIME'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.CASE: 'CASE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.STAGE: 'STAGE'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATE32: 'DATE32'>, <TokenType.MAP: 'MAP'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.END: 'END'>, <TokenType.NAME: 'NAME'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.IPV6: 'IPV6'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.APPLY: 'APPLY'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.DETACH: 'DETACH'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.RING: 'RING'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.ASC: 'ASC'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UINT: 'UINT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.SET: 'SET'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.VAR: 'VAR'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.VOID: 'VOID'>, <TokenType.IS: 'IS'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.MERGE: 'MERGE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.UINT128: 'UINT128'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CHAR: 'CHAR'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.CUBE: 'CUBE'>, <TokenType.DIV: 'DIV'>, <TokenType.BIT: 'BIT'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.COPY: 'COPY'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.LIST: 'LIST'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INT128: 'INT128'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.SOME: 'SOME'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.JSON: 'JSON'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NULL: 'NULL'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.FINAL: 'FINAL'>, <TokenType.FILTER: 'FILTER'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.And'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPLY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Apply'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Columns'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Contains'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_SCHEMA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentSchema'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATE_BIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateBin'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exists'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FEATURES_AT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FeaturesAtTime'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'INT64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Int64'>>, 'IS_ASCII': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsAscii'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExists'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_B_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBObjectAgg'>>, 'J_S_O_N_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONCast'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractArray'>>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'J_S_O_N_VALUE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONValueArray'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function Snowflake.Parser.<lambda>>, 'LEN': <function Snowflake.Parser.<lambda>>, 'CHAR_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'CHARACTER_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAKE_INTERVAL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MakeInterval'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MEDIAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Median'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Or'>>, 'OVERLAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Overlay'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_EXTRACT_ALL': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SPLIT_PART': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SplitPart'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.String'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <function build_timetostr_or_tochar>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_DOUBLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDouble'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDatetime'>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNICODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unicode'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_SECONDS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixSeconds'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'XMLELEMENT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLElement'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'STRPOS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'CHARINDEX': <function Parser.<lambda>>, 'INSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'LOCATE': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY_CONSTRUCT': <function Snowflake.Parser.<lambda>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function _build_bitwise.<locals>._builder>, 'BIT_XOR': <function _build_bitwise.<locals>._builder>, 'BITOR': <function _build_bitwise.<locals>._builder>, 'BIT_OR': <function _build_bitwise.<locals>._builder>, 'BITSHIFTLEFT': <function _build_bitwise.<locals>._builder>, 'BIT_SHIFTLEFT': <function _build_bitwise.<locals>._builder>, 'BITSHIFTRIGHT': <function _build_bitwise.<locals>._builder>, 'BIT_SHIFTRIGHT': <function _build_bitwise.<locals>._builder>, 'BOOLXOR': <function _build_bitwise.<locals>._builder>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'EDITDISTANCE': <function Snowflake.Parser.<lambda>>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'HEX_DECODE_BINARY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_SUBSTR_ALL': <function _build_regexp_extract.<locals>._builder>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TABLE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TIMESTAMPNTZFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_NTZ_FROM_PARTS': <function build_timestamp_from_parts>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIME': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'ARG_MAX': <function Parser.<dictcomp>.<lambda>>, 'ARGMAX': <function Parser.<dictcomp>.<lambda>>, 'MAX_BY': <function Parser.<dictcomp>.<lambda>>, 'ARG_MIN': <function Parser.<dictcomp>.<lambda>>, 'ARGMIN': <function Parser.<dictcomp>.<lambda>>, 'MIN_BY': <function Parser.<dictcomp>.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CEIL': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'FLOOR': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'OVERLAY': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'XMLELEMENT': <function Parser.<lambda>>, 'XMLTABLE': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>, 'LISTAGG': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>}
RANGE_PARSERS = {<TokenType.AT_GT: 'AT_GT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.LT_AT: 'LT_AT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'AS': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SWAP': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.ANALYZE: 'ANALYZE'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.GRANT: 'GRANT'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UNPIVOT: 'UNPIVOT'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.GET: 'GET'>: <function Snowflake.Parser.<lambda>>, <TokenType.PUT: 'PUT'>: <function Snowflake.Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Snowflake.Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'CREDENTIALS': <function Snowflake.Parser.<lambda>>, 'FILE_FORMAT': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'DATABASES': <function _show_parser.<locals>._parse>, 'TERSE DATABASES': <function _show_parser.<locals>._parse>, 'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'STAGES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>, 'FILE FORMATS': <function _show_parser.<locals>._parse>, 'FUNCTIONS': <function _show_parser.<locals>._parse>, 'PROCEDURES': <function _show_parser.<locals>._parse>, 'WAREHOUSES': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WATERMARK': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'BUCKET': <function Parser.<lambda>>, 'TRUNCATE': <function Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.DOT: 'DOT'>, <TokenType.SLASH: 'SLASH'>, <TokenType.MOD: 'MOD'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'VIEWS', 'OBJECTS', 'IMPORTED KEYS', 'UNIQUE KEYS', 'TABLES', 'SEQUENCES'}
NON_TABLE_CREATABLES = {'STREAMLIT', 'TAG', 'WAREHOUSE', 'STORAGE INTEGRATION'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'DATABASES': {0: True}, 'TERSE': {'DATABASES': {0: True}, 'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'STAGES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}, 'FILE': {'FORMATS': {0: True}}, 'FUNCTIONS': {0: True}, 'PROCEDURES': {0: True}, 'WAREHOUSES': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
PROCEDURE_OPTIONS
EXECUTE_AS_OPTIONS
KEY_CONSTRAINT_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
PRIVILEGE_FOLLOW_TOKENS
DESCRIBE_STYLES
ANALYZE_STYLES
ANALYZE_EXPRESSION_PARSERS
PARTITION_KEYWORDS
AMBIGUOUS_ALIAS_TOKENS
OPERATION_MODIFIERS
RECURSIVE_CTE_SEARCH_KIND
MODIFIABLES
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
OPTIONAL_ALIAS_TOKEN_CTE
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
parse_set_operation
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
942    class Tokenizer(tokens.Tokenizer):
943        STRING_ESCAPES = ["\\", "'"]
944        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
945        RAW_STRINGS = ["$$"]
946        COMMENTS = ["--", "//", ("/*", "*/")]
947        NESTED_COMMENTS = False
948
949        KEYWORDS = {
950            **tokens.Tokenizer.KEYWORDS,
951            "FILE://": TokenType.URI_START,
952            "BYTEINT": TokenType.INT,
953            "CHAR VARYING": TokenType.VARCHAR,
954            "CHARACTER VARYING": TokenType.VARCHAR,
955            "EXCLUDE": TokenType.EXCEPT,
956            "FILE FORMAT": TokenType.FILE_FORMAT,
957            "GET": TokenType.GET,
958            "ILIKE ANY": TokenType.ILIKE_ANY,
959            "LIKE ANY": TokenType.LIKE_ANY,
960            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
961            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
962            "MINUS": TokenType.EXCEPT,
963            "NCHAR VARYING": TokenType.VARCHAR,
964            "PUT": TokenType.PUT,
965            "REMOVE": TokenType.COMMAND,
966            "RM": TokenType.COMMAND,
967            "SAMPLE": TokenType.TABLE_SAMPLE,
968            "SQL_DOUBLE": TokenType.DOUBLE,
969            "SQL_VARCHAR": TokenType.VARCHAR,
970            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
971            "TAG": TokenType.TAG,
972            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
973            "TOP": TokenType.TOP,
974            "WAREHOUSE": TokenType.WAREHOUSE,
975            "STAGE": TokenType.STAGE,
976            "STREAMLIT": TokenType.STREAMLIT,
977        }
978        KEYWORDS.pop("/*+")
979
980        SINGLE_TOKENS = {
981            **tokens.Tokenizer.SINGLE_TOKENS,
982            "$": TokenType.PARAMETER,
983        }
984
985        VAR_SINGLE_TOKENS = {"$"}
986
987        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_SCHEMA': <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NAMESPACE': <TokenType.NAMESPACE: 'NAMESPACE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT256': <TokenType.INT256: 'INT256'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'UINT128': <TokenType.UINT128: 'UINT128'>, 'UINT256': <TokenType.UINT256: 'UINT256'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'DECIMAL256': <TokenType.DECIMAL256: 'DECIMAL256'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.ANALYZE: 'ANALYZE'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'FILE://': <TokenType.URI_START: 'URI_START'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'FILE FORMAT': <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, 'GET': <TokenType.GET: 'GET'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.PUT: 'PUT'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STAGE': <TokenType.STAGE: 'STAGE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.FETCH: 'FETCH'>, <TokenType.RENAME: 'RENAME'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.EXECUTE: 'EXECUTE'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 989    class Generator(generator.Generator):
 990        PARAMETER_TOKEN = "$"
 991        MATCHED_BY_SOURCE = False
 992        SINGLE_STRING_INTERVAL = True
 993        JOIN_HINTS = False
 994        TABLE_HINTS = False
 995        QUERY_HINTS = False
 996        AGGREGATE_FILTER_SUPPORTED = False
 997        SUPPORTS_TABLE_COPY = False
 998        COLLATE_IS_FUNC = True
 999        LIMIT_ONLY_LITERALS = True
1000        JSON_KEY_VALUE_PAIR_SEP = ","
1001        INSERT_OVERWRITE = " OVERWRITE INTO"
1002        STRUCT_DELIMITER = ("(", ")")
1003        COPY_PARAMS_ARE_WRAPPED = False
1004        COPY_PARAMS_EQ_REQUIRED = True
1005        STAR_EXCEPT = "EXCLUDE"
1006        SUPPORTS_EXPLODING_PROJECTIONS = False
1007        ARRAY_CONCAT_IS_VAR_LEN = False
1008        SUPPORTS_CONVERT_TIMEZONE = True
1009        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
1010        SUPPORTS_MEDIAN = True
1011        ARRAY_SIZE_NAME = "ARRAY_SIZE"
1012
1013        TRANSFORMS = {
1014            **generator.Generator.TRANSFORMS,
1015            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1016            exp.ArgMax: rename_func("MAX_BY"),
1017            exp.ArgMin: rename_func("MIN_BY"),
1018            exp.Array: inline_array_sql,
1019            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
1020            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
1021            exp.AtTimeZone: lambda self, e: self.func(
1022                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
1023            ),
1024            exp.BitwiseOr: rename_func("BITOR"),
1025            exp.BitwiseXor: rename_func("BITXOR"),
1026            exp.BitwiseLeftShift: rename_func("BITSHIFTLEFT"),
1027            exp.BitwiseRightShift: rename_func("BITSHIFTRIGHT"),
1028            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
1029            exp.DateAdd: date_delta_sql("DATEADD"),
1030            exp.DateDiff: date_delta_sql("DATEDIFF"),
1031            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
1032            exp.DatetimeDiff: timestampdiff_sql,
1033            exp.DateStrToDate: datestrtodate_sql,
1034            exp.DayOfMonth: rename_func("DAYOFMONTH"),
1035            exp.DayOfWeek: rename_func("DAYOFWEEK"),
1036            exp.DayOfYear: rename_func("DAYOFYEAR"),
1037            exp.Explode: rename_func("FLATTEN"),
1038            exp.Extract: rename_func("DATE_PART"),
1039            exp.FileFormatProperty: lambda self,
1040            e: f"FILE_FORMAT=({self.expressions(e, 'expressions', sep=' ')})",
1041            exp.FromTimeZone: lambda self, e: self.func(
1042                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
1043            ),
1044            exp.GenerateSeries: lambda self, e: self.func(
1045                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
1046            ),
1047            exp.GroupConcat: lambda self, e: groupconcat_sql(self, e, sep=""),
1048            exp.If: if_sql(name="IFF", false_value="NULL"),
1049            exp.JSONExtractArray: _json_extract_value_array_sql,
1050            exp.JSONExtractScalar: lambda self, e: self.func(
1051                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
1052            ),
1053            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
1054            exp.JSONPathRoot: lambda *_: "",
1055            exp.JSONValueArray: _json_extract_value_array_sql,
1056            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
1057                rename_func("EDITDISTANCE")
1058            ),
1059            exp.LocationProperty: lambda self, e: f"LOCATION={self.sql(e, 'this')}",
1060            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
1061            exp.LogicalOr: rename_func("BOOLOR_AGG"),
1062            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1063            exp.MakeInterval: no_make_interval_sql,
1064            exp.Max: max_or_greatest,
1065            exp.Min: min_or_least,
1066            exp.ParseJSON: lambda self, e: self.func(
1067                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
1068            ),
1069            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1070            exp.PercentileCont: transforms.preprocess(
1071                [transforms.add_within_group_for_percentiles]
1072            ),
1073            exp.PercentileDisc: transforms.preprocess(
1074                [transforms.add_within_group_for_percentiles]
1075            ),
1076            exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]),
1077            exp.RegexpExtract: _regexpextract_sql,
1078            exp.RegexpExtractAll: _regexpextract_sql,
1079            exp.RegexpILike: _regexpilike_sql,
1080            exp.Rand: rename_func("RANDOM"),
1081            exp.Select: transforms.preprocess(
1082                [
1083                    transforms.eliminate_distinct_on,
1084                    transforms.explode_projection_to_unnest(),
1085                    transforms.eliminate_semi_and_anti_joins,
1086                    _transform_generate_date_array,
1087                ]
1088            ),
1089            exp.SHA: rename_func("SHA1"),
1090            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
1091            exp.StartsWith: rename_func("STARTSWITH"),
1092            exp.StrPosition: lambda self, e: strposition_sql(
1093                self, e, func_name="CHARINDEX", supports_position=True
1094            ),
1095            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
1096            exp.Stuff: rename_func("INSERT"),
1097            exp.TimeAdd: date_delta_sql("TIMEADD"),
1098            exp.Timestamp: no_timestamp_sql,
1099            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
1100            exp.TimestampDiff: lambda self, e: self.func(
1101                "TIMESTAMPDIFF", e.unit, e.expression, e.this
1102            ),
1103            exp.TimestampTrunc: timestamptrunc_sql(),
1104            exp.TimeStrToTime: timestrtotime_sql,
1105            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
1106            exp.ToArray: rename_func("TO_ARRAY"),
1107            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
1108            exp.ToDouble: rename_func("TO_DOUBLE"),
1109            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
1110            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
1111            exp.TsOrDsToDate: lambda self, e: self.func(
1112                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
1113            ),
1114            exp.TsOrDsToTime: lambda self, e: self.func(
1115                "TRY_TO_TIME" if e.args.get("safe") else "TO_TIME", e.this, self.format_time(e)
1116            ),
1117            exp.Unhex: rename_func("HEX_DECODE_BINARY"),
1118            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
1119            exp.Uuid: rename_func("UUID_STRING"),
1120            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1121            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
1122            exp.Xor: rename_func("BOOLXOR"),
1123        }
1124
1125        SUPPORTED_JSON_PATH_PARTS = {
1126            exp.JSONPathKey,
1127            exp.JSONPathRoot,
1128            exp.JSONPathSubscript,
1129        }
1130
1131        TYPE_MAPPING = {
1132            **generator.Generator.TYPE_MAPPING,
1133            exp.DataType.Type.NESTED: "OBJECT",
1134            exp.DataType.Type.STRUCT: "OBJECT",
1135        }
1136
1137        TOKEN_MAPPING = {
1138            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
1139        }
1140
1141        PROPERTIES_LOCATION = {
1142            **generator.Generator.PROPERTIES_LOCATION,
1143            exp.CredentialsProperty: exp.Properties.Location.POST_WITH,
1144            exp.LocationProperty: exp.Properties.Location.POST_WITH,
1145            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1146            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
1147            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1148        }
1149
1150        UNSUPPORTED_VALUES_EXPRESSIONS = {
1151            exp.Map,
1152            exp.StarMap,
1153            exp.Struct,
1154            exp.VarMap,
1155        }
1156
1157        def with_properties(self, properties: exp.Properties) -> str:
1158            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1159
1160        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1161            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1162                values_as_table = False
1163
1164            return super().values_sql(expression, values_as_table=values_as_table)
1165
1166        def datatype_sql(self, expression: exp.DataType) -> str:
1167            expressions = expression.expressions
1168            if (
1169                expressions
1170                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1171                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1172            ):
1173                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1174                return "OBJECT"
1175
1176            return super().datatype_sql(expression)
1177
1178        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1179            return self.func(
1180                "TO_NUMBER",
1181                expression.this,
1182                expression.args.get("format"),
1183                expression.args.get("precision"),
1184                expression.args.get("scale"),
1185            )
1186
1187        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1188            milli = expression.args.get("milli")
1189            if milli is not None:
1190                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1191                expression.set("nano", milli_to_nano)
1192
1193            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1194
1195        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1196            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1197                return self.func("TO_GEOGRAPHY", expression.this)
1198            if expression.is_type(exp.DataType.Type.GEOMETRY):
1199                return self.func("TO_GEOMETRY", expression.this)
1200
1201            return super().cast_sql(expression, safe_prefix=safe_prefix)
1202
1203        def trycast_sql(self, expression: exp.TryCast) -> str:
1204            value = expression.this
1205
1206            if value.type is None:
1207                from sqlglot.optimizer.annotate_types import annotate_types
1208
1209                value = annotate_types(value, dialect=self.dialect)
1210
1211            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1212                return super().trycast_sql(expression)
1213
1214            # TRY_CAST only works for string values in Snowflake
1215            return self.cast_sql(expression)
1216
1217        def log_sql(self, expression: exp.Log) -> str:
1218            if not expression.expression:
1219                return self.func("LN", expression.this)
1220
1221            return super().log_sql(expression)
1222
1223        def unnest_sql(self, expression: exp.Unnest) -> str:
1224            unnest_alias = expression.args.get("alias")
1225            offset = expression.args.get("offset")
1226
1227            columns = [
1228                exp.to_identifier("seq"),
1229                exp.to_identifier("key"),
1230                exp.to_identifier("path"),
1231                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1232                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1233                or exp.to_identifier("value"),
1234                exp.to_identifier("this"),
1235            ]
1236
1237            if unnest_alias:
1238                unnest_alias.set("columns", columns)
1239            else:
1240                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1241
1242            table_input = self.sql(expression.expressions[0])
1243            if not table_input.startswith("INPUT =>"):
1244                table_input = f"INPUT => {table_input}"
1245
1246            explode = f"TABLE(FLATTEN({table_input}))"
1247            alias = self.sql(unnest_alias)
1248            alias = f" AS {alias}" if alias else ""
1249            return f"{explode}{alias}"
1250
1251        def show_sql(self, expression: exp.Show) -> str:
1252            terse = "TERSE " if expression.args.get("terse") else ""
1253            history = " HISTORY" if expression.args.get("history") else ""
1254            like = self.sql(expression, "like")
1255            like = f" LIKE {like}" if like else ""
1256
1257            scope = self.sql(expression, "scope")
1258            scope = f" {scope}" if scope else ""
1259
1260            scope_kind = self.sql(expression, "scope_kind")
1261            if scope_kind:
1262                scope_kind = f" IN {scope_kind}"
1263
1264            starts_with = self.sql(expression, "starts_with")
1265            if starts_with:
1266                starts_with = f" STARTS WITH {starts_with}"
1267
1268            limit = self.sql(expression, "limit")
1269
1270            from_ = self.sql(expression, "from")
1271            if from_:
1272                from_ = f" FROM {from_}"
1273
1274            privileges = self.expressions(expression, key="privileges", flat=True)
1275            privileges = f" WITH PRIVILEGES {privileges}" if privileges else ""
1276
1277            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}{privileges}"
1278
1279        def describe_sql(self, expression: exp.Describe) -> str:
1280            # Default to table if kind is unknown
1281            kind_value = expression.args.get("kind") or "TABLE"
1282            kind = f" {kind_value}" if kind_value else ""
1283            this = f" {self.sql(expression, 'this')}"
1284            expressions = self.expressions(expression, flat=True)
1285            expressions = f" {expressions}" if expressions else ""
1286            return f"DESCRIBE{kind}{this}{expressions}"
1287
1288        def generatedasidentitycolumnconstraint_sql(
1289            self, expression: exp.GeneratedAsIdentityColumnConstraint
1290        ) -> str:
1291            start = expression.args.get("start")
1292            start = f" START {start}" if start else ""
1293            increment = expression.args.get("increment")
1294            increment = f" INCREMENT {increment}" if increment else ""
1295            return f"AUTOINCREMENT{start}{increment}"
1296
1297        def cluster_sql(self, expression: exp.Cluster) -> str:
1298            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1299
1300        def struct_sql(self, expression: exp.Struct) -> str:
1301            keys = []
1302            values = []
1303
1304            for i, e in enumerate(expression.expressions):
1305                if isinstance(e, exp.PropertyEQ):
1306                    keys.append(
1307                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1308                    )
1309                    values.append(e.expression)
1310                else:
1311                    keys.append(exp.Literal.string(f"_{i}"))
1312                    values.append(e)
1313
1314            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1315
1316        @unsupported_args("weight", "accuracy")
1317        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1318            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1319
1320        def alterset_sql(self, expression: exp.AlterSet) -> str:
1321            exprs = self.expressions(expression, flat=True)
1322            exprs = f" {exprs}" if exprs else ""
1323            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1324            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1325            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1326            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1327            tag = self.expressions(expression, key="tag", flat=True)
1328            tag = f" TAG {tag}" if tag else ""
1329
1330            return f"SET{exprs}{file_format}{copy_options}{tag}"
1331
1332        def strtotime_sql(self, expression: exp.StrToTime):
1333            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1334            return self.func(
1335                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1336            )
1337
1338        def timestampsub_sql(self, expression: exp.TimestampSub):
1339            return self.sql(
1340                exp.TimestampAdd(
1341                    this=expression.this,
1342                    expression=expression.expression * -1,
1343                    unit=expression.unit,
1344                )
1345            )
1346
1347        def jsonextract_sql(self, expression: exp.JSONExtract):
1348            this = expression.this
1349
1350            # JSON strings are valid coming from other dialects such as BQ
1351            return self.func(
1352                "GET_PATH",
1353                exp.ParseJSON(this=this) if this.is_string else this,
1354                expression.expression,
1355            )
1356
1357        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1358            this = expression.this
1359            if not isinstance(this, exp.TsOrDsToTimestamp):
1360                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1361
1362            return self.func("TO_CHAR", this, self.format_time(expression))
1363
1364        def datesub_sql(self, expression: exp.DateSub) -> str:
1365            value = expression.expression
1366            if value:
1367                value.replace(value * (-1))
1368            else:
1369                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1370
1371            return date_delta_sql("DATEADD")(self, expression)
1372
1373        def select_sql(self, expression: exp.Select) -> str:
1374            limit = expression.args.get("limit")
1375            offset = expression.args.get("offset")
1376            if offset and not limit:
1377                expression.limit(exp.Null(), copy=False)
1378            return super().select_sql(expression)

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
SUPPORTS_EXPLODING_PROJECTIONS = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_CONVERT_TIMEZONE = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_MEDIAN = True
ARRAY_SIZE_NAME = 'ARRAY_SIZE'
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AnalyzeColumns'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AnalyzeWith'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Ceil'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CredentialsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Floor'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Get'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Int64'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByBucket'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PartitionByTruncate'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Put'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SwapTable'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Tags'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UsingTemplateProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UsingData'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithProcedureOptions'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ForceProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.BitwiseLeftShift'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.BitwiseRightShift'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeDiff'>: <function timestampdiff_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FileFormatProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtractArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONValueArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.Levenshtein'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.MakeInterval'>: <function no_make_interval_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpExtract'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpExtractAll'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.Timestamp'>: <function no_timestamp_sql>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToDouble'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.DATETIME2: 'DATETIME2'>: 'TIMESTAMP', <Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.BLOB: 'BLOB'>: 'VARBINARY', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.SMALLDATETIME: 'SMALLDATETIME'>: 'TIMESTAMP', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
TOKEN_MAPPING = {<TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>: 'AUTOINCREMENT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EncodeProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.IncludeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StorageHandlerProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Tags'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.UsingTemplateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithProcedureOptions'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ForceProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.CredentialsProperty'>: <Location.POST_WITH: 'POST_WITH'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.Map'>, <class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.VarMap'>, <class 'sqlglot.expressions.StarMap'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
1157        def with_properties(self, properties: exp.Properties) -> str:
1158            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
1160        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1161            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1162                values_as_table = False
1163
1164            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
1166        def datatype_sql(self, expression: exp.DataType) -> str:
1167            expressions = expression.expressions
1168            if (
1169                expressions
1170                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1171                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1172            ):
1173                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1174                return "OBJECT"
1175
1176            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
1178        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1179            return self.func(
1180                "TO_NUMBER",
1181                expression.this,
1182                expression.args.get("format"),
1183                expression.args.get("precision"),
1184                expression.args.get("scale"),
1185            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
1187        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1188            milli = expression.args.get("milli")
1189            if milli is not None:
1190                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1191                expression.set("nano", milli_to_nano)
1192
1193            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
1195        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1196            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1197                return self.func("TO_GEOGRAPHY", expression.this)
1198            if expression.is_type(exp.DataType.Type.GEOMETRY):
1199                return self.func("TO_GEOMETRY", expression.this)
1200
1201            return super().cast_sql(expression, safe_prefix=safe_prefix)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
1203        def trycast_sql(self, expression: exp.TryCast) -> str:
1204            value = expression.this
1205
1206            if value.type is None:
1207                from sqlglot.optimizer.annotate_types import annotate_types
1208
1209                value = annotate_types(value, dialect=self.dialect)
1210
1211            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1212                return super().trycast_sql(expression)
1213
1214            # TRY_CAST only works for string values in Snowflake
1215            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
1217        def log_sql(self, expression: exp.Log) -> str:
1218            if not expression.expression:
1219                return self.func("LN", expression.this)
1220
1221            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
1223        def unnest_sql(self, expression: exp.Unnest) -> str:
1224            unnest_alias = expression.args.get("alias")
1225            offset = expression.args.get("offset")
1226
1227            columns = [
1228                exp.to_identifier("seq"),
1229                exp.to_identifier("key"),
1230                exp.to_identifier("path"),
1231                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1232                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1233                or exp.to_identifier("value"),
1234                exp.to_identifier("this"),
1235            ]
1236
1237            if unnest_alias:
1238                unnest_alias.set("columns", columns)
1239            else:
1240                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1241
1242            table_input = self.sql(expression.expressions[0])
1243            if not table_input.startswith("INPUT =>"):
1244                table_input = f"INPUT => {table_input}"
1245
1246            explode = f"TABLE(FLATTEN({table_input}))"
1247            alias = self.sql(unnest_alias)
1248            alias = f" AS {alias}" if alias else ""
1249            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
1251        def show_sql(self, expression: exp.Show) -> str:
1252            terse = "TERSE " if expression.args.get("terse") else ""
1253            history = " HISTORY" if expression.args.get("history") else ""
1254            like = self.sql(expression, "like")
1255            like = f" LIKE {like}" if like else ""
1256
1257            scope = self.sql(expression, "scope")
1258            scope = f" {scope}" if scope else ""
1259
1260            scope_kind = self.sql(expression, "scope_kind")
1261            if scope_kind:
1262                scope_kind = f" IN {scope_kind}"
1263
1264            starts_with = self.sql(expression, "starts_with")
1265            if starts_with:
1266                starts_with = f" STARTS WITH {starts_with}"
1267
1268            limit = self.sql(expression, "limit")
1269
1270            from_ = self.sql(expression, "from")
1271            if from_:
1272                from_ = f" FROM {from_}"
1273
1274            privileges = self.expressions(expression, key="privileges", flat=True)
1275            privileges = f" WITH PRIVILEGES {privileges}" if privileges else ""
1276
1277            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}{privileges}"
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1279        def describe_sql(self, expression: exp.Describe) -> str:
1280            # Default to table if kind is unknown
1281            kind_value = expression.args.get("kind") or "TABLE"
1282            kind = f" {kind_value}" if kind_value else ""
1283            this = f" {self.sql(expression, 'this')}"
1284            expressions = self.expressions(expression, flat=True)
1285            expressions = f" {expressions}" if expressions else ""
1286            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1288        def generatedasidentitycolumnconstraint_sql(
1289            self, expression: exp.GeneratedAsIdentityColumnConstraint
1290        ) -> str:
1291            start = expression.args.get("start")
1292            start = f" START {start}" if start else ""
1293            increment = expression.args.get("increment")
1294            increment = f" INCREMENT {increment}" if increment else ""
1295            return f"AUTOINCREMENT{start}{increment}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1297        def cluster_sql(self, expression: exp.Cluster) -> str:
1298            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1300        def struct_sql(self, expression: exp.Struct) -> str:
1301            keys = []
1302            values = []
1303
1304            for i, e in enumerate(expression.expressions):
1305                if isinstance(e, exp.PropertyEQ):
1306                    keys.append(
1307                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1308                    )
1309                    values.append(e.expression)
1310                else:
1311                    keys.append(exp.Literal.string(f"_{i}"))
1312                    values.append(e)
1313
1314            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
@unsupported_args('weight', 'accuracy')
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1316        @unsupported_args("weight", "accuracy")
1317        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1318            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1320        def alterset_sql(self, expression: exp.AlterSet) -> str:
1321            exprs = self.expressions(expression, flat=True)
1322            exprs = f" {exprs}" if exprs else ""
1323            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1324            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1325            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1326            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1327            tag = self.expressions(expression, key="tag", flat=True)
1328            tag = f" TAG {tag}" if tag else ""
1329
1330            return f"SET{exprs}{file_format}{copy_options}{tag}"
def strtotime_sql(self, expression: sqlglot.expressions.StrToTime):
1332        def strtotime_sql(self, expression: exp.StrToTime):
1333            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1334            return self.func(
1335                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1336            )
def timestampsub_sql(self, expression: sqlglot.expressions.TimestampSub):
1338        def timestampsub_sql(self, expression: exp.TimestampSub):
1339            return self.sql(
1340                exp.TimestampAdd(
1341                    this=expression.this,
1342                    expression=expression.expression * -1,
1343                    unit=expression.unit,
1344                )
1345            )
def jsonextract_sql(self, expression: sqlglot.expressions.JSONExtract):
1347        def jsonextract_sql(self, expression: exp.JSONExtract):
1348            this = expression.this
1349
1350            # JSON strings are valid coming from other dialects such as BQ
1351            return self.func(
1352                "GET_PATH",
1353                exp.ParseJSON(this=this) if this.is_string else this,
1354                expression.expression,
1355            )
def timetostr_sql(self, expression: sqlglot.expressions.TimeToStr) -> str:
1357        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1358            this = expression.this
1359            if not isinstance(this, exp.TsOrDsToTimestamp):
1360                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1361
1362            return self.func("TO_CHAR", this, self.format_time(expression))
def datesub_sql(self, expression: sqlglot.expressions.DateSub) -> str:
1364        def datesub_sql(self, expression: exp.DateSub) -> str:
1365            value = expression.expression
1366            if value:
1367                value.replace(value * (-1))
1368            else:
1369                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1370
1371            return date_delta_sql("DATEADD")(self, expression)
def select_sql(self, expression: sqlglot.expressions.Select) -> str:
1373        def select_sql(self, expression: exp.Select) -> str:
1374            limit = expression.args.get("limit")
1375            offset = expression.args.get("offset")
1376            if offset and not limit:
1377                expression.limit(exp.Null(), copy=False)
1378            return super().select_sql(expression)
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'windows': <function Generator.<lambda>>, 'qualify': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
SUPPORTS_UNIX_SECONDS
PARSE_JSON_NAME
ALTER_SET_TYPE
ARRAY_SIZE_DIM_REQUIRED
TIME_PART_SINGULARS
NAMED_PLACEHOLDER_TOKEN
EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
limitoptions_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablefromrows_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterindex_sql
alterdiststyle_sql
altersortkey_sql
alterrename_sql
renamecolumn_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
safedivide_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
jsoncast_sql
try_sql
use_sql
binary
ceil_floor
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
whens_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
uniquekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodatetime_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonextractquote_sql
jsonexists_sql
arrayagg_sql
apply_sql
grant_sql
grantprivilege_sql
grantprincipal_sql
columns_sql
overlay_sql
todouble_sql
string_sql
median_sql
overflowtruncatebehavior_sql
unixseconds_sql
arraysize_sql
attach_sql
detach_sql
attachoption_sql
featuresattime_sql
watermarkcolumnconstraint_sql
encodeproperty_sql
includeproperty_sql
xmlelement_sql
partitionbyrangeproperty_sql
partitionbyrangepropertydynamic_sql
unpivotcolumns_sql
analyzesample_sql
analyzestatistics_sql
analyzehistogram_sql
analyzedelete_sql
analyzelistchainedrows_sql
analyzevalidate_sql
analyze_sql
xmltable_sql
xmlnamespace_sql
export_sql
declare_sql
declareitem_sql
recursivewithsearch_sql
parameterizedagg_sql
anonymousaggfunc_sql
combinedaggfunc_sql
combinedparameterizedagg_sql
get_put_sql