Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, jsonpath, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    build_timetostr_or_tochar,
  10    binary_from_function,
  11    build_default_decimal_type,
  12    build_timestamp_from_parts,
  13    date_delta_sql,
  14    date_trunc_to_time,
  15    datestrtodate_sql,
  16    build_formatted_time,
  17    if_sql,
  18    inline_array_sql,
  19    max_or_greatest,
  20    min_or_least,
  21    rename_func,
  22    timestamptrunc_sql,
  23    timestrtotime_sql,
  24    var_map_sql,
  25    map_date_part,
  26    no_timestamp_sql,
  27    strposition_sql,
  28    timestampdiff_sql,
  29    no_make_interval_sql,
  30    groupconcat_sql,
  31)
  32from sqlglot.generator import unsupported_args
  33from sqlglot.helper import flatten, is_float, is_int, seq_get
  34from sqlglot.tokens import TokenType
  35
  36if t.TYPE_CHECKING:
  37    from sqlglot._typing import E, B
  38
  39
  40# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  41def _build_datetime(
  42    name: str, kind: exp.DataType.Type, safe: bool = False
  43) -> t.Callable[[t.List], exp.Func]:
  44    def _builder(args: t.List) -> exp.Func:
  45        value = seq_get(args, 0)
  46        scale_or_fmt = seq_get(args, 1)
  47
  48        int_value = value is not None and is_int(value.name)
  49        int_scale_or_fmt = scale_or_fmt is not None and scale_or_fmt.is_int
  50
  51        if isinstance(value, exp.Literal) or (value and scale_or_fmt):
  52            # Converts calls like `TO_TIME('01:02:03')` into casts
  53            if len(args) == 1 and value.is_string and not int_value:
  54                return (
  55                    exp.TryCast(this=value, to=exp.DataType.build(kind))
  56                    if safe
  57                    else exp.cast(value, kind)
  58                )
  59
  60            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  61            # cases so we can transpile them, since they're relatively common
  62            if kind == exp.DataType.Type.TIMESTAMP:
  63                if not safe and (int_value or int_scale_or_fmt):
  64                    # TRY_TO_TIMESTAMP('integer') is not parsed into exp.UnixToTime as
  65                    # it's not easily transpilable
  66                    return exp.UnixToTime(this=value, scale=scale_or_fmt)
  67                if not int_scale_or_fmt and not is_float(value.name):
  68                    expr = build_formatted_time(exp.StrToTime, "snowflake")(args)
  69                    expr.set("safe", safe)
  70                    return expr
  71
  72        if kind in (exp.DataType.Type.DATE, exp.DataType.Type.TIME) and not int_value:
  73            klass = exp.TsOrDsToDate if kind == exp.DataType.Type.DATE else exp.TsOrDsToTime
  74            formatted_exp = build_formatted_time(klass, "snowflake")(args)
  75            formatted_exp.set("safe", safe)
  76            return formatted_exp
  77
  78        return exp.Anonymous(this=name, expressions=args)
  79
  80    return _builder
  81
  82
  83def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  84    expression = parser.build_var_map(args)
  85
  86    if isinstance(expression, exp.StarMap):
  87        return expression
  88
  89    return exp.Struct(
  90        expressions=[
  91            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  92        ]
  93    )
  94
  95
  96def _build_datediff(args: t.List) -> exp.DateDiff:
  97    return exp.DateDiff(
  98        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  99    )
 100
 101
 102def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
 103    def _builder(args: t.List) -> E:
 104        return expr_type(
 105            this=seq_get(args, 2),
 106            expression=seq_get(args, 1),
 107            unit=map_date_part(seq_get(args, 0)),
 108        )
 109
 110    return _builder
 111
 112
 113def _build_bitwise(expr_type: t.Type[B], name: str) -> t.Callable[[t.List], B | exp.Anonymous]:
 114    def _builder(args: t.List) -> B | exp.Anonymous:
 115        if len(args) == 3:
 116            return exp.Anonymous(this=name, expressions=args)
 117
 118        return binary_from_function(expr_type)(args)
 119
 120    return _builder
 121
 122
 123# https://docs.snowflake.com/en/sql-reference/functions/div0
 124def _build_if_from_div0(args: t.List) -> exp.If:
 125    lhs = exp._wrap(seq_get(args, 0), exp.Binary)
 126    rhs = exp._wrap(seq_get(args, 1), exp.Binary)
 127
 128    cond = exp.EQ(this=rhs, expression=exp.Literal.number(0)).and_(
 129        exp.Is(this=lhs, expression=exp.null()).not_()
 130    )
 131    true = exp.Literal.number(0)
 132    false = exp.Div(this=lhs, expression=rhs)
 133    return exp.If(this=cond, true=true, false=false)
 134
 135
 136# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 137def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 138    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 139    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 140
 141
 142# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 143def _build_if_from_nullifzero(args: t.List) -> exp.If:
 144    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 145    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 146
 147
 148def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 149    flag = expression.text("flag")
 150
 151    if "i" not in flag:
 152        flag += "i"
 153
 154    return self.func(
 155        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 156    )
 157
 158
 159def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 160    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 161
 162    if not regexp_replace.args.get("replacement"):
 163        regexp_replace.set("replacement", exp.Literal.string(""))
 164
 165    return regexp_replace
 166
 167
 168def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 169    def _parse(self: Snowflake.Parser) -> exp.Show:
 170        return self._parse_show_snowflake(*args, **kwargs)
 171
 172    return _parse
 173
 174
 175def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 176    trunc = date_trunc_to_time(args)
 177    trunc.set("unit", map_date_part(trunc.args["unit"]))
 178    return trunc
 179
 180
 181def _unqualify_pivot_columns(expression: exp.Expression) -> exp.Expression:
 182    """
 183    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 184    so we need to unqualify them. Same goes for ANY ORDER BY <column>.
 185
 186    Example:
 187        >>> from sqlglot import parse_one
 188        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 189        >>> print(_unqualify_pivot_columns(expr).sql(dialect="snowflake"))
 190        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 191    """
 192    if isinstance(expression, exp.Pivot):
 193        if expression.unpivot:
 194            expression = transforms.unqualify_columns(expression)
 195        else:
 196            for field in expression.fields:
 197                field_expr = seq_get(field.expressions if field else [], 0)
 198
 199                if isinstance(field_expr, exp.PivotAny):
 200                    unqualified_field_expr = transforms.unqualify_columns(field_expr)
 201                    t.cast(exp.Expression, field).set("expressions", unqualified_field_expr, 0)
 202
 203    return expression
 204
 205
 206def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 207    assert isinstance(expression, exp.Create)
 208
 209    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 210        if expression.this in exp.DataType.NESTED_TYPES:
 211            expression.set("expressions", None)
 212        return expression
 213
 214    props = expression.args.get("properties")
 215    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 216        for schema_expression in expression.this.expressions:
 217            if isinstance(schema_expression, exp.ColumnDef):
 218                column_type = schema_expression.kind
 219                if isinstance(column_type, exp.DataType):
 220                    column_type.transform(_flatten_structured_type, copy=False)
 221
 222    return expression
 223
 224
 225def _unnest_generate_date_array(unnest: exp.Unnest) -> None:
 226    generate_date_array = unnest.expressions[0]
 227    start = generate_date_array.args.get("start")
 228    end = generate_date_array.args.get("end")
 229    step = generate_date_array.args.get("step")
 230
 231    if not start or not end or not isinstance(step, exp.Interval) or step.name != "1":
 232        return
 233
 234    unit = step.args.get("unit")
 235
 236    unnest_alias = unnest.args.get("alias")
 237    if unnest_alias:
 238        unnest_alias = unnest_alias.copy()
 239        sequence_value_name = seq_get(unnest_alias.columns, 0) or "value"
 240    else:
 241        sequence_value_name = "value"
 242
 243    # We'll add the next sequence value to the starting date and project the result
 244    date_add = _build_date_time_add(exp.DateAdd)(
 245        [unit, exp.cast(sequence_value_name, "int"), exp.cast(start, "date")]
 246    ).as_(sequence_value_name)
 247
 248    # We use DATEDIFF to compute the number of sequence values needed
 249    number_sequence = Snowflake.Parser.FUNCTIONS["ARRAY_GENERATE_RANGE"](
 250        [exp.Literal.number(0), _build_datediff([unit, start, end]) + 1]
 251    )
 252
 253    unnest.set("expressions", [number_sequence])
 254    unnest.replace(exp.select(date_add).from_(unnest.copy()).subquery(unnest_alias))
 255
 256
 257def _transform_generate_date_array(expression: exp.Expression) -> exp.Expression:
 258    if isinstance(expression, exp.Select):
 259        for generate_date_array in expression.find_all(exp.GenerateDateArray):
 260            parent = generate_date_array.parent
 261
 262            # If GENERATE_DATE_ARRAY is used directly as an array (e.g passed into ARRAY_LENGTH), the transformed Snowflake
 263            # query is the following (it'll be unnested properly on the next iteration due to copy):
 264            # SELECT ref(GENERATE_DATE_ARRAY(...)) -> SELECT ref((SELECT ARRAY_AGG(*) FROM UNNEST(GENERATE_DATE_ARRAY(...))))
 265            if not isinstance(parent, exp.Unnest):
 266                unnest = exp.Unnest(expressions=[generate_date_array.copy()])
 267                generate_date_array.replace(
 268                    exp.select(exp.ArrayAgg(this=exp.Star())).from_(unnest).subquery()
 269                )
 270
 271            if (
 272                isinstance(parent, exp.Unnest)
 273                and isinstance(parent.parent, (exp.From, exp.Join))
 274                and len(parent.expressions) == 1
 275            ):
 276                _unnest_generate_date_array(parent)
 277
 278    return expression
 279
 280
 281def _build_regexp_extract(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
 282    def _builder(args: t.List) -> E:
 283        return expr_type(
 284            this=seq_get(args, 0),
 285            expression=seq_get(args, 1),
 286            position=seq_get(args, 2),
 287            occurrence=seq_get(args, 3),
 288            parameters=seq_get(args, 4),
 289            group=seq_get(args, 5) or exp.Literal.number(0),
 290        )
 291
 292    return _builder
 293
 294
 295def _regexpextract_sql(self, expression: exp.RegexpExtract | exp.RegexpExtractAll) -> str:
 296    # Other dialects don't support all of the following parameters, so we need to
 297    # generate default values as necessary to ensure the transpilation is correct
 298    group = expression.args.get("group")
 299
 300    # To avoid generating all these default values, we set group to None if
 301    # it's 0 (also default value) which doesn't trigger the following chain
 302    if group and group.name == "0":
 303        group = None
 304
 305    parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 306    occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 307    position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 308
 309    return self.func(
 310        "REGEXP_SUBSTR" if isinstance(expression, exp.RegexpExtract) else "REGEXP_EXTRACT_ALL",
 311        expression.this,
 312        expression.expression,
 313        position,
 314        occurrence,
 315        parameters,
 316        group,
 317    )
 318
 319
 320def _json_extract_value_array_sql(
 321    self: Snowflake.Generator, expression: exp.JSONValueArray | exp.JSONExtractArray
 322) -> str:
 323    json_extract = exp.JSONExtract(this=expression.this, expression=expression.expression)
 324    ident = exp.to_identifier("x")
 325
 326    if isinstance(expression, exp.JSONValueArray):
 327        this: exp.Expression = exp.cast(ident, to=exp.DataType.Type.VARCHAR)
 328    else:
 329        this = exp.ParseJSON(this=f"TO_JSON({ident})")
 330
 331    transform_lambda = exp.Lambda(expressions=[ident], this=this)
 332
 333    return self.func("TRANSFORM", json_extract, transform_lambda)
 334
 335
 336class Snowflake(Dialect):
 337    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 338    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 339    NULL_ORDERING = "nulls_are_large"
 340    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 341    SUPPORTS_USER_DEFINED_TYPES = False
 342    SUPPORTS_SEMI_ANTI_JOIN = False
 343    PREFER_CTE_ALIAS_COLUMN = True
 344    TABLESAMPLE_SIZE_IS_PERCENT = True
 345    COPY_PARAMS_ARE_CSV = False
 346    ARRAY_AGG_INCLUDES_NULLS = None
 347
 348    TIME_MAPPING = {
 349        "YYYY": "%Y",
 350        "yyyy": "%Y",
 351        "YY": "%y",
 352        "yy": "%y",
 353        "MMMM": "%B",
 354        "mmmm": "%B",
 355        "MON": "%b",
 356        "mon": "%b",
 357        "MM": "%m",
 358        "mm": "%m",
 359        "DD": "%d",
 360        "dd": "%-d",
 361        "DY": "%a",
 362        "dy": "%w",
 363        "HH24": "%H",
 364        "hh24": "%H",
 365        "HH12": "%I",
 366        "hh12": "%I",
 367        "MI": "%M",
 368        "mi": "%M",
 369        "SS": "%S",
 370        "ss": "%S",
 371        "FF6": "%f",
 372        "ff6": "%f",
 373    }
 374
 375    DATE_PART_MAPPING = {
 376        **Dialect.DATE_PART_MAPPING,
 377        "ISOWEEK": "WEEKISO",
 378    }
 379
 380    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 381        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 382        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 383        if (
 384            isinstance(expression, exp.Identifier)
 385            and isinstance(expression.parent, exp.Table)
 386            and expression.name.lower() == "dual"
 387        ):
 388            return expression  # type: ignore
 389
 390        return super().quote_identifier(expression, identify=identify)
 391
 392    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
 393        SINGLE_TOKENS = jsonpath.JSONPathTokenizer.SINGLE_TOKENS.copy()
 394        SINGLE_TOKENS.pop("$")
 395
 396    class Parser(parser.Parser):
 397        IDENTIFY_PIVOT_STRINGS = True
 398        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 399        COLON_IS_VARIANT_EXTRACT = True
 400
 401        ID_VAR_TOKENS = {
 402            *parser.Parser.ID_VAR_TOKENS,
 403            TokenType.MATCH_CONDITION,
 404        }
 405
 406        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 407        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 408
 409        COLON_PLACEHOLDER_TOKENS = ID_VAR_TOKENS | {TokenType.NUMBER}
 410
 411        FUNCTIONS = {
 412            **parser.Parser.FUNCTIONS,
 413            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 414            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 415            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 416                this=seq_get(args, 1), expression=seq_get(args, 0)
 417            ),
 418            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 419                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 420                start=seq_get(args, 0),
 421                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 422                step=seq_get(args, 2),
 423            ),
 424            "BITXOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 425            "BIT_XOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 426            "BITOR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 427            "BIT_OR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 428            "BITSHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BITSHIFTLEFT"),
 429            "BIT_SHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BIT_SHIFTLEFT"),
 430            "BITSHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BITSHIFTRIGHT"),
 431            "BIT_SHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BIT_SHIFTRIGHT"),
 432            "BOOLXOR": _build_bitwise(exp.Xor, "BOOLXOR"),
 433            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 434            "DATE_TRUNC": _date_trunc_to_time,
 435            "DATEADD": _build_date_time_add(exp.DateAdd),
 436            "DATEDIFF": _build_datediff,
 437            "DIV0": _build_if_from_div0,
 438            "EDITDISTANCE": lambda args: exp.Levenshtein(
 439                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 440            ),
 441            "FLATTEN": exp.Explode.from_arg_list,
 442            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 443                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 444            ),
 445            "HEX_DECODE_BINARY": exp.Unhex.from_arg_list,
 446            "IFF": exp.If.from_arg_list,
 447            "LAST_DAY": lambda args: exp.LastDay(
 448                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 449            ),
 450            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 451            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 452            "NULLIFZERO": _build_if_from_nullifzero,
 453            "OBJECT_CONSTRUCT": _build_object_construct,
 454            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 455            "REGEXP_REPLACE": _build_regexp_replace,
 456            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 457            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 458            "RLIKE": exp.RegexpLike.from_arg_list,
 459            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 460            "TABLE": lambda args: exp.TableFromRows(this=seq_get(args, 0)),
 461            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 462            "TIMEDIFF": _build_datediff,
 463            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 464            "TIMESTAMPDIFF": _build_datediff,
 465            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 466            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 467            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 468            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 469            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 470            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 471            "TRY_TO_TIME": _build_datetime("TRY_TO_TIME", exp.DataType.Type.TIME, safe=True),
 472            "TRY_TO_TIMESTAMP": _build_datetime(
 473                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 474            ),
 475            "TO_CHAR": build_timetostr_or_tochar,
 476            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 477            "TO_NUMBER": lambda args: exp.ToNumber(
 478                this=seq_get(args, 0),
 479                format=seq_get(args, 1),
 480                precision=seq_get(args, 2),
 481                scale=seq_get(args, 3),
 482            ),
 483            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 484            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 485            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 486            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 487            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 488            "TO_VARCHAR": exp.ToChar.from_arg_list,
 489            "ZEROIFNULL": _build_if_from_zeroifnull,
 490        }
 491
 492        FUNCTION_PARSERS = {
 493            **parser.Parser.FUNCTION_PARSERS,
 494            "DATE_PART": lambda self: self._parse_date_part(),
 495            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 496            "LISTAGG": lambda self: self._parse_string_agg(),
 497        }
 498        FUNCTION_PARSERS.pop("TRIM")
 499
 500        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 501
 502        RANGE_PARSERS = {
 503            **parser.Parser.RANGE_PARSERS,
 504            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 505            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 506        }
 507
 508        ALTER_PARSERS = {
 509            **parser.Parser.ALTER_PARSERS,
 510            "UNSET": lambda self: self.expression(
 511                exp.Set,
 512                tag=self._match_text_seq("TAG"),
 513                expressions=self._parse_csv(self._parse_id_var),
 514                unset=True,
 515            ),
 516        }
 517
 518        STATEMENT_PARSERS = {
 519            **parser.Parser.STATEMENT_PARSERS,
 520            TokenType.GET: lambda self: self._parse_get(),
 521            TokenType.PUT: lambda self: self._parse_put(),
 522            TokenType.SHOW: lambda self: self._parse_show(),
 523        }
 524
 525        PROPERTY_PARSERS = {
 526            **parser.Parser.PROPERTY_PARSERS,
 527            "CREDENTIALS": lambda self: self._parse_credentials_property(),
 528            "FILE_FORMAT": lambda self: self._parse_file_format_property(),
 529            "LOCATION": lambda self: self._parse_location_property(),
 530            "TAG": lambda self: self._parse_tag(),
 531            "USING": lambda self: self._match_text_seq("TEMPLATE")
 532            and self.expression(exp.UsingTemplateProperty, this=self._parse_statement()),
 533        }
 534
 535        TYPE_CONVERTERS = {
 536            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 537            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 538        }
 539
 540        SHOW_PARSERS = {
 541            "DATABASES": _show_parser("DATABASES"),
 542            "TERSE DATABASES": _show_parser("DATABASES"),
 543            "SCHEMAS": _show_parser("SCHEMAS"),
 544            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 545            "OBJECTS": _show_parser("OBJECTS"),
 546            "TERSE OBJECTS": _show_parser("OBJECTS"),
 547            "TABLES": _show_parser("TABLES"),
 548            "TERSE TABLES": _show_parser("TABLES"),
 549            "VIEWS": _show_parser("VIEWS"),
 550            "TERSE VIEWS": _show_parser("VIEWS"),
 551            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 552            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 553            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 554            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 555            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 556            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 557            "SEQUENCES": _show_parser("SEQUENCES"),
 558            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 559            "STAGES": _show_parser("STAGES"),
 560            "COLUMNS": _show_parser("COLUMNS"),
 561            "USERS": _show_parser("USERS"),
 562            "TERSE USERS": _show_parser("USERS"),
 563            "FILE FORMATS": _show_parser("FILE FORMATS"),
 564            "FUNCTIONS": _show_parser("FUNCTIONS"),
 565            "PROCEDURES": _show_parser("PROCEDURES"),
 566            "WAREHOUSES": _show_parser("WAREHOUSES"),
 567        }
 568
 569        CONSTRAINT_PARSERS = {
 570            **parser.Parser.CONSTRAINT_PARSERS,
 571            "WITH": lambda self: self._parse_with_constraint(),
 572            "MASKING": lambda self: self._parse_with_constraint(),
 573            "PROJECTION": lambda self: self._parse_with_constraint(),
 574            "TAG": lambda self: self._parse_with_constraint(),
 575        }
 576
 577        STAGED_FILE_SINGLE_TOKENS = {
 578            TokenType.DOT,
 579            TokenType.MOD,
 580            TokenType.SLASH,
 581        }
 582
 583        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 584
 585        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 586
 587        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 588
 589        LAMBDAS = {
 590            **parser.Parser.LAMBDAS,
 591            TokenType.ARROW: lambda self, expressions: self.expression(
 592                exp.Lambda,
 593                this=self._replace_lambda(
 594                    self._parse_assignment(),
 595                    expressions,
 596                ),
 597                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 598            ),
 599        }
 600
 601        def _parse_use(self) -> exp.Use:
 602            if self._match_text_seq("SECONDARY", "ROLES"):
 603                this = self._match_texts(("ALL", "NONE")) and exp.var(self._prev.text.upper())
 604                roles = None if this else self._parse_csv(lambda: self._parse_table(schema=False))
 605                return self.expression(
 606                    exp.Use, kind="SECONDARY ROLES", this=this, expressions=roles
 607                )
 608
 609            return super()._parse_use()
 610
 611        def _negate_range(
 612            self, this: t.Optional[exp.Expression] = None
 613        ) -> t.Optional[exp.Expression]:
 614            if not this:
 615                return this
 616
 617            query = this.args.get("query")
 618            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 619                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 620                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 621                # which can produce different results (most likely a SnowFlake bug).
 622                #
 623                # https://docs.snowflake.com/en/sql-reference/functions/in
 624                # Context: https://github.com/tobymao/sqlglot/issues/3890
 625                return self.expression(
 626                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 627                )
 628
 629            return self.expression(exp.Not, this=this)
 630
 631        def _parse_tag(self) -> exp.Tags:
 632            return self.expression(
 633                exp.Tags,
 634                expressions=self._parse_wrapped_csv(self._parse_property),
 635            )
 636
 637        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 638            if self._prev.token_type != TokenType.WITH:
 639                self._retreat(self._index - 1)
 640
 641            if self._match_text_seq("MASKING", "POLICY"):
 642                policy = self._parse_column()
 643                return self.expression(
 644                    exp.MaskingPolicyColumnConstraint,
 645                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 646                    expressions=self._match(TokenType.USING)
 647                    and self._parse_wrapped_csv(self._parse_id_var),
 648                )
 649            if self._match_text_seq("PROJECTION", "POLICY"):
 650                policy = self._parse_column()
 651                return self.expression(
 652                    exp.ProjectionPolicyColumnConstraint,
 653                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 654                )
 655            if self._match(TokenType.TAG):
 656                return self._parse_tag()
 657
 658            return None
 659
 660        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
 661            if self._match(TokenType.TAG):
 662                return self._parse_tag()
 663
 664            return super()._parse_with_property()
 665
 666        def _parse_create(self) -> exp.Create | exp.Command:
 667            expression = super()._parse_create()
 668            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 669                # Replace the Table node with the enclosed Identifier
 670                expression.this.replace(expression.this.this)
 671
 672            return expression
 673
 674        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 675        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 676        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 677            this = self._parse_var() or self._parse_type()
 678
 679            if not this:
 680                return None
 681
 682            self._match(TokenType.COMMA)
 683            expression = self._parse_bitwise()
 684            this = map_date_part(this)
 685            name = this.name.upper()
 686
 687            if name.startswith("EPOCH"):
 688                if name == "EPOCH_MILLISECOND":
 689                    scale = 10**3
 690                elif name == "EPOCH_MICROSECOND":
 691                    scale = 10**6
 692                elif name == "EPOCH_NANOSECOND":
 693                    scale = 10**9
 694                else:
 695                    scale = None
 696
 697                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 698                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 699
 700                if scale:
 701                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 702
 703                return to_unix
 704
 705            return self.expression(exp.Extract, this=this, expression=expression)
 706
 707        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 708            if is_map:
 709                # Keys are strings in Snowflake's objects, see also:
 710                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 711                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 712                return self._parse_slice(self._parse_string())
 713
 714            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 715
 716        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 717            lateral = super()._parse_lateral()
 718            if not lateral:
 719                return lateral
 720
 721            if isinstance(lateral.this, exp.Explode):
 722                table_alias = lateral.args.get("alias")
 723                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 724                if table_alias and not table_alias.args.get("columns"):
 725                    table_alias.set("columns", columns)
 726                elif not table_alias:
 727                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 728
 729            return lateral
 730
 731        def _parse_table_parts(
 732            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 733        ) -> exp.Table:
 734            # https://docs.snowflake.com/en/user-guide/querying-stage
 735            if self._match(TokenType.STRING, advance=False):
 736                table = self._parse_string()
 737            elif self._match_text_seq("@", advance=False):
 738                table = self._parse_location_path()
 739            else:
 740                table = None
 741
 742            if table:
 743                file_format = None
 744                pattern = None
 745
 746                wrapped = self._match(TokenType.L_PAREN)
 747                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 748                    if self._match_text_seq("FILE_FORMAT", "=>"):
 749                        file_format = self._parse_string() or super()._parse_table_parts(
 750                            is_db_reference=is_db_reference
 751                        )
 752                    elif self._match_text_seq("PATTERN", "=>"):
 753                        pattern = self._parse_string()
 754                    else:
 755                        break
 756
 757                    self._match(TokenType.COMMA)
 758
 759                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 760            else:
 761                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 762
 763            return table
 764
 765        def _parse_table(
 766            self,
 767            schema: bool = False,
 768            joins: bool = False,
 769            alias_tokens: t.Optional[t.Collection[TokenType]] = None,
 770            parse_bracket: bool = False,
 771            is_db_reference: bool = False,
 772            parse_partition: bool = False,
 773        ) -> t.Optional[exp.Expression]:
 774            table = super()._parse_table(
 775                schema=schema,
 776                joins=joins,
 777                alias_tokens=alias_tokens,
 778                parse_bracket=parse_bracket,
 779                is_db_reference=is_db_reference,
 780                parse_partition=parse_partition,
 781            )
 782            if isinstance(table, exp.Table) and isinstance(table.this, exp.TableFromRows):
 783                table_from_rows = table.this
 784                for arg in exp.TableFromRows.arg_types:
 785                    if arg != "this":
 786                        table_from_rows.set(arg, table.args.get(arg))
 787
 788                table = table_from_rows
 789
 790            return table
 791
 792        def _parse_id_var(
 793            self,
 794            any_token: bool = True,
 795            tokens: t.Optional[t.Collection[TokenType]] = None,
 796        ) -> t.Optional[exp.Expression]:
 797            if self._match_text_seq("IDENTIFIER", "("):
 798                identifier = (
 799                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 800                    or self._parse_string()
 801                )
 802                self._match_r_paren()
 803                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 804
 805            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 806
 807        def _parse_show_snowflake(self, this: str) -> exp.Show:
 808            scope = None
 809            scope_kind = None
 810
 811            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 812            # which is syntactically valid but has no effect on the output
 813            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 814
 815            history = self._match_text_seq("HISTORY")
 816
 817            like = self._parse_string() if self._match(TokenType.LIKE) else None
 818
 819            if self._match(TokenType.IN):
 820                if self._match_text_seq("ACCOUNT"):
 821                    scope_kind = "ACCOUNT"
 822                elif self._match_text_seq("CLASS"):
 823                    scope_kind = "CLASS"
 824                    scope = self._parse_table_parts()
 825                elif self._match_text_seq("APPLICATION"):
 826                    scope_kind = "APPLICATION"
 827                    if self._match_text_seq("PACKAGE"):
 828                        scope_kind += " PACKAGE"
 829                    scope = self._parse_table_parts()
 830                elif self._match_set(self.DB_CREATABLES):
 831                    scope_kind = self._prev.text.upper()
 832                    if self._curr:
 833                        scope = self._parse_table_parts()
 834                elif self._curr:
 835                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 836                    scope = self._parse_table_parts()
 837
 838            return self.expression(
 839                exp.Show,
 840                **{
 841                    "terse": terse,
 842                    "this": this,
 843                    "history": history,
 844                    "like": like,
 845                    "scope": scope,
 846                    "scope_kind": scope_kind,
 847                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 848                    "limit": self._parse_limit(),
 849                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 850                    "privileges": self._match_text_seq("WITH", "PRIVILEGES")
 851                    and self._parse_csv(lambda: self._parse_var(any_token=True, upper=True)),
 852                },
 853            )
 854
 855        def _parse_put(self) -> exp.Put | exp.Command:
 856            if self._curr.token_type != TokenType.STRING:
 857                return self._parse_as_command(self._prev)
 858
 859            return self.expression(
 860                exp.Put,
 861                this=self._parse_string(),
 862                target=self._parse_location_path(),
 863                properties=self._parse_properties(),
 864            )
 865
 866        def _parse_get(self) -> exp.Get | exp.Command:
 867            start = self._prev
 868            target = self._parse_location_path()
 869
 870            # Parse as command if unquoted file path
 871            if self._curr.token_type == TokenType.URI_START:
 872                return self._parse_as_command(start)
 873
 874            return self.expression(
 875                exp.Get,
 876                this=self._parse_string(),
 877                target=target,
 878                properties=self._parse_properties(),
 879            )
 880
 881        def _parse_location_property(self) -> exp.LocationProperty:
 882            self._match(TokenType.EQ)
 883            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 884
 885        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 886            # Parse either a subquery or a staged file
 887            return (
 888                self._parse_select(table=True, parse_subquery_alias=False)
 889                if self._match(TokenType.L_PAREN, advance=False)
 890                else self._parse_table_parts()
 891            )
 892
 893        def _parse_location_path(self) -> exp.Var:
 894            start = self._curr
 895            self._advance_any(ignore_reserved=True)
 896
 897            # We avoid consuming a comma token because external tables like @foo and @bar
 898            # can be joined in a query with a comma separator, as well as closing paren
 899            # in case of subqueries
 900            while self._is_connected() and not self._match_set(
 901                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 902            ):
 903                self._advance_any(ignore_reserved=True)
 904
 905            return exp.var(self._find_sql(start, self._prev))
 906
 907        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 908            this = super()._parse_lambda_arg()
 909
 910            if not this:
 911                return this
 912
 913            typ = self._parse_types()
 914
 915            if typ:
 916                return self.expression(exp.Cast, this=this, to=typ)
 917
 918            return this
 919
 920        def _parse_foreign_key(self) -> exp.ForeignKey:
 921            # inlineFK, the REFERENCES columns are implied
 922            if self._match(TokenType.REFERENCES, advance=False):
 923                return self.expression(exp.ForeignKey)
 924
 925            # outoflineFK, explicitly names the columns
 926            return super()._parse_foreign_key()
 927
 928        def _parse_file_format_property(self) -> exp.FileFormatProperty:
 929            self._match(TokenType.EQ)
 930            if self._match(TokenType.L_PAREN, advance=False):
 931                expressions = self._parse_wrapped_options()
 932            else:
 933                expressions = [self._parse_format_name()]
 934
 935            return self.expression(
 936                exp.FileFormatProperty,
 937                expressions=expressions,
 938            )
 939
 940        def _parse_credentials_property(self) -> exp.CredentialsProperty:
 941            return self.expression(
 942                exp.CredentialsProperty,
 943                expressions=self._parse_wrapped_options(),
 944            )
 945
 946    class Tokenizer(tokens.Tokenizer):
 947        STRING_ESCAPES = ["\\", "'"]
 948        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 949        RAW_STRINGS = ["$$"]
 950        COMMENTS = ["--", "//", ("/*", "*/")]
 951        NESTED_COMMENTS = False
 952
 953        KEYWORDS = {
 954            **tokens.Tokenizer.KEYWORDS,
 955            "FILE://": TokenType.URI_START,
 956            "BYTEINT": TokenType.INT,
 957            "EXCLUDE": TokenType.EXCEPT,
 958            "FILE FORMAT": TokenType.FILE_FORMAT,
 959            "GET": TokenType.GET,
 960            "ILIKE ANY": TokenType.ILIKE_ANY,
 961            "LIKE ANY": TokenType.LIKE_ANY,
 962            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 963            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 964            "MINUS": TokenType.EXCEPT,
 965            "NCHAR VARYING": TokenType.VARCHAR,
 966            "PUT": TokenType.PUT,
 967            "REMOVE": TokenType.COMMAND,
 968            "RM": TokenType.COMMAND,
 969            "SAMPLE": TokenType.TABLE_SAMPLE,
 970            "SQL_DOUBLE": TokenType.DOUBLE,
 971            "SQL_VARCHAR": TokenType.VARCHAR,
 972            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 973            "TAG": TokenType.TAG,
 974            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 975            "TOP": TokenType.TOP,
 976            "WAREHOUSE": TokenType.WAREHOUSE,
 977            "STAGE": TokenType.STAGE,
 978            "STREAMLIT": TokenType.STREAMLIT,
 979        }
 980        KEYWORDS.pop("/*+")
 981
 982        SINGLE_TOKENS = {
 983            **tokens.Tokenizer.SINGLE_TOKENS,
 984            "$": TokenType.PARAMETER,
 985        }
 986
 987        VAR_SINGLE_TOKENS = {"$"}
 988
 989        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 990
 991    class Generator(generator.Generator):
 992        PARAMETER_TOKEN = "$"
 993        MATCHED_BY_SOURCE = False
 994        SINGLE_STRING_INTERVAL = True
 995        JOIN_HINTS = False
 996        TABLE_HINTS = False
 997        QUERY_HINTS = False
 998        AGGREGATE_FILTER_SUPPORTED = False
 999        SUPPORTS_TABLE_COPY = False
1000        COLLATE_IS_FUNC = True
1001        LIMIT_ONLY_LITERALS = True
1002        JSON_KEY_VALUE_PAIR_SEP = ","
1003        INSERT_OVERWRITE = " OVERWRITE INTO"
1004        STRUCT_DELIMITER = ("(", ")")
1005        COPY_PARAMS_ARE_WRAPPED = False
1006        COPY_PARAMS_EQ_REQUIRED = True
1007        STAR_EXCEPT = "EXCLUDE"
1008        SUPPORTS_EXPLODING_PROJECTIONS = False
1009        ARRAY_CONCAT_IS_VAR_LEN = False
1010        SUPPORTS_CONVERT_TIMEZONE = True
1011        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
1012        SUPPORTS_MEDIAN = True
1013        ARRAY_SIZE_NAME = "ARRAY_SIZE"
1014
1015        TRANSFORMS = {
1016            **generator.Generator.TRANSFORMS,
1017            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1018            exp.ArgMax: rename_func("MAX_BY"),
1019            exp.ArgMin: rename_func("MIN_BY"),
1020            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
1021            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
1022            exp.AtTimeZone: lambda self, e: self.func(
1023                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
1024            ),
1025            exp.BitwiseOr: rename_func("BITOR"),
1026            exp.BitwiseXor: rename_func("BITXOR"),
1027            exp.BitwiseLeftShift: rename_func("BITSHIFTLEFT"),
1028            exp.BitwiseRightShift: rename_func("BITSHIFTRIGHT"),
1029            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
1030            exp.DateAdd: date_delta_sql("DATEADD"),
1031            exp.DateDiff: date_delta_sql("DATEDIFF"),
1032            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
1033            exp.DatetimeDiff: timestampdiff_sql,
1034            exp.DateStrToDate: datestrtodate_sql,
1035            exp.DayOfMonth: rename_func("DAYOFMONTH"),
1036            exp.DayOfWeek: rename_func("DAYOFWEEK"),
1037            exp.DayOfWeekIso: rename_func("DAYOFWEEKISO"),
1038            exp.DayOfYear: rename_func("DAYOFYEAR"),
1039            exp.Explode: rename_func("FLATTEN"),
1040            exp.Extract: lambda self, e: self.func(
1041                "DATE_PART", map_date_part(e.this, self.dialect), e.expression
1042            ),
1043            exp.FileFormatProperty: lambda self,
1044            e: f"FILE_FORMAT=({self.expressions(e, 'expressions', sep=' ')})",
1045            exp.FromTimeZone: lambda self, e: self.func(
1046                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
1047            ),
1048            exp.GenerateSeries: lambda self, e: self.func(
1049                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
1050            ),
1051            exp.GroupConcat: lambda self, e: groupconcat_sql(self, e, sep=""),
1052            exp.If: if_sql(name="IFF", false_value="NULL"),
1053            exp.JSONExtractArray: _json_extract_value_array_sql,
1054            exp.JSONExtractScalar: lambda self, e: self.func(
1055                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
1056            ),
1057            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
1058            exp.JSONPathRoot: lambda *_: "",
1059            exp.JSONValueArray: _json_extract_value_array_sql,
1060            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
1061                rename_func("EDITDISTANCE")
1062            ),
1063            exp.LocationProperty: lambda self, e: f"LOCATION={self.sql(e, 'this')}",
1064            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
1065            exp.LogicalOr: rename_func("BOOLOR_AGG"),
1066            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1067            exp.MakeInterval: no_make_interval_sql,
1068            exp.Max: max_or_greatest,
1069            exp.Min: min_or_least,
1070            exp.ParseJSON: lambda self, e: self.func(
1071                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
1072            ),
1073            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1074            exp.PercentileCont: transforms.preprocess(
1075                [transforms.add_within_group_for_percentiles]
1076            ),
1077            exp.PercentileDisc: transforms.preprocess(
1078                [transforms.add_within_group_for_percentiles]
1079            ),
1080            exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]),
1081            exp.RegexpExtract: _regexpextract_sql,
1082            exp.RegexpExtractAll: _regexpextract_sql,
1083            exp.RegexpILike: _regexpilike_sql,
1084            exp.Rand: rename_func("RANDOM"),
1085            exp.Select: transforms.preprocess(
1086                [
1087                    transforms.eliminate_window_clause,
1088                    transforms.eliminate_distinct_on,
1089                    transforms.explode_projection_to_unnest(),
1090                    transforms.eliminate_semi_and_anti_joins,
1091                    _transform_generate_date_array,
1092                ]
1093            ),
1094            exp.SHA: rename_func("SHA1"),
1095            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
1096            exp.StartsWith: rename_func("STARTSWITH"),
1097            exp.StrPosition: lambda self, e: strposition_sql(
1098                self, e, func_name="CHARINDEX", supports_position=True
1099            ),
1100            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
1101            exp.Stuff: rename_func("INSERT"),
1102            exp.TimeAdd: date_delta_sql("TIMEADD"),
1103            exp.Timestamp: no_timestamp_sql,
1104            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
1105            exp.TimestampDiff: lambda self, e: self.func(
1106                "TIMESTAMPDIFF", e.unit, e.expression, e.this
1107            ),
1108            exp.TimestampTrunc: timestamptrunc_sql(),
1109            exp.TimeStrToTime: timestrtotime_sql,
1110            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
1111            exp.ToArray: rename_func("TO_ARRAY"),
1112            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
1113            exp.ToDouble: rename_func("TO_DOUBLE"),
1114            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
1115            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
1116            exp.TsOrDsToDate: lambda self, e: self.func(
1117                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
1118            ),
1119            exp.TsOrDsToTime: lambda self, e: self.func(
1120                "TRY_TO_TIME" if e.args.get("safe") else "TO_TIME", e.this, self.format_time(e)
1121            ),
1122            exp.Unhex: rename_func("HEX_DECODE_BINARY"),
1123            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
1124            exp.Uuid: rename_func("UUID_STRING"),
1125            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1126            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
1127            exp.Xor: rename_func("BOOLXOR"),
1128        }
1129
1130        SUPPORTED_JSON_PATH_PARTS = {
1131            exp.JSONPathKey,
1132            exp.JSONPathRoot,
1133            exp.JSONPathSubscript,
1134        }
1135
1136        TYPE_MAPPING = {
1137            **generator.Generator.TYPE_MAPPING,
1138            exp.DataType.Type.NESTED: "OBJECT",
1139            exp.DataType.Type.STRUCT: "OBJECT",
1140            exp.DataType.Type.BIGDECIMAL: "DOUBLE",
1141        }
1142
1143        TOKEN_MAPPING = {
1144            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
1145        }
1146
1147        PROPERTIES_LOCATION = {
1148            **generator.Generator.PROPERTIES_LOCATION,
1149            exp.CredentialsProperty: exp.Properties.Location.POST_WITH,
1150            exp.LocationProperty: exp.Properties.Location.POST_WITH,
1151            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1152            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
1153            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1154        }
1155
1156        UNSUPPORTED_VALUES_EXPRESSIONS = {
1157            exp.Map,
1158            exp.StarMap,
1159            exp.Struct,
1160            exp.VarMap,
1161        }
1162
1163        RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS = (exp.ArrayAgg,)
1164
1165        def with_properties(self, properties: exp.Properties) -> str:
1166            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1167
1168        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1169            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1170                values_as_table = False
1171
1172            return super().values_sql(expression, values_as_table=values_as_table)
1173
1174        def datatype_sql(self, expression: exp.DataType) -> str:
1175            expressions = expression.expressions
1176            if (
1177                expressions
1178                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1179                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1180            ):
1181                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1182                return "OBJECT"
1183
1184            return super().datatype_sql(expression)
1185
1186        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1187            return self.func(
1188                "TO_NUMBER",
1189                expression.this,
1190                expression.args.get("format"),
1191                expression.args.get("precision"),
1192                expression.args.get("scale"),
1193            )
1194
1195        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1196            milli = expression.args.get("milli")
1197            if milli is not None:
1198                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1199                expression.set("nano", milli_to_nano)
1200
1201            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1202
1203        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1204            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1205                return self.func("TO_GEOGRAPHY", expression.this)
1206            if expression.is_type(exp.DataType.Type.GEOMETRY):
1207                return self.func("TO_GEOMETRY", expression.this)
1208
1209            return super().cast_sql(expression, safe_prefix=safe_prefix)
1210
1211        def trycast_sql(self, expression: exp.TryCast) -> str:
1212            value = expression.this
1213
1214            if value.type is None:
1215                from sqlglot.optimizer.annotate_types import annotate_types
1216
1217                value = annotate_types(value, dialect=self.dialect)
1218
1219            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1220                return super().trycast_sql(expression)
1221
1222            # TRY_CAST only works for string values in Snowflake
1223            return self.cast_sql(expression)
1224
1225        def log_sql(self, expression: exp.Log) -> str:
1226            if not expression.expression:
1227                return self.func("LN", expression.this)
1228
1229            return super().log_sql(expression)
1230
1231        def unnest_sql(self, expression: exp.Unnest) -> str:
1232            unnest_alias = expression.args.get("alias")
1233            offset = expression.args.get("offset")
1234
1235            unnest_alias_columns = unnest_alias.columns if unnest_alias else []
1236            value = seq_get(unnest_alias_columns, 0) or exp.to_identifier("value")
1237
1238            columns = [
1239                exp.to_identifier("seq"),
1240                exp.to_identifier("key"),
1241                exp.to_identifier("path"),
1242                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1243                value,
1244                exp.to_identifier("this"),
1245            ]
1246
1247            if unnest_alias:
1248                unnest_alias.set("columns", columns)
1249            else:
1250                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1251
1252            table_input = self.sql(expression.expressions[0])
1253            if not table_input.startswith("INPUT =>"):
1254                table_input = f"INPUT => {table_input}"
1255
1256            explode = f"TABLE(FLATTEN({table_input}))"
1257            alias = self.sql(unnest_alias)
1258            alias = f" AS {alias}" if alias else ""
1259            value = "" if isinstance(expression.parent, (exp.From, exp.Join)) else f"{value} FROM "
1260
1261            return f"{value}{explode}{alias}"
1262
1263        def show_sql(self, expression: exp.Show) -> str:
1264            terse = "TERSE " if expression.args.get("terse") else ""
1265            history = " HISTORY" if expression.args.get("history") else ""
1266            like = self.sql(expression, "like")
1267            like = f" LIKE {like}" if like else ""
1268
1269            scope = self.sql(expression, "scope")
1270            scope = f" {scope}" if scope else ""
1271
1272            scope_kind = self.sql(expression, "scope_kind")
1273            if scope_kind:
1274                scope_kind = f" IN {scope_kind}"
1275
1276            starts_with = self.sql(expression, "starts_with")
1277            if starts_with:
1278                starts_with = f" STARTS WITH {starts_with}"
1279
1280            limit = self.sql(expression, "limit")
1281
1282            from_ = self.sql(expression, "from")
1283            if from_:
1284                from_ = f" FROM {from_}"
1285
1286            privileges = self.expressions(expression, key="privileges", flat=True)
1287            privileges = f" WITH PRIVILEGES {privileges}" if privileges else ""
1288
1289            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}{privileges}"
1290
1291        def describe_sql(self, expression: exp.Describe) -> str:
1292            # Default to table if kind is unknown
1293            kind_value = expression.args.get("kind") or "TABLE"
1294            kind = f" {kind_value}" if kind_value else ""
1295            this = f" {self.sql(expression, 'this')}"
1296            expressions = self.expressions(expression, flat=True)
1297            expressions = f" {expressions}" if expressions else ""
1298            return f"DESCRIBE{kind}{this}{expressions}"
1299
1300        def generatedasidentitycolumnconstraint_sql(
1301            self, expression: exp.GeneratedAsIdentityColumnConstraint
1302        ) -> str:
1303            start = expression.args.get("start")
1304            start = f" START {start}" if start else ""
1305            increment = expression.args.get("increment")
1306            increment = f" INCREMENT {increment}" if increment else ""
1307            return f"AUTOINCREMENT{start}{increment}"
1308
1309        def cluster_sql(self, expression: exp.Cluster) -> str:
1310            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1311
1312        def struct_sql(self, expression: exp.Struct) -> str:
1313            keys = []
1314            values = []
1315
1316            for i, e in enumerate(expression.expressions):
1317                if isinstance(e, exp.PropertyEQ):
1318                    keys.append(
1319                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1320                    )
1321                    values.append(e.expression)
1322                else:
1323                    keys.append(exp.Literal.string(f"_{i}"))
1324                    values.append(e)
1325
1326            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1327
1328        @unsupported_args("weight", "accuracy")
1329        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1330            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1331
1332        def alterset_sql(self, expression: exp.AlterSet) -> str:
1333            exprs = self.expressions(expression, flat=True)
1334            exprs = f" {exprs}" if exprs else ""
1335            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1336            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1337            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1338            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1339            tag = self.expressions(expression, key="tag", flat=True)
1340            tag = f" TAG {tag}" if tag else ""
1341
1342            return f"SET{exprs}{file_format}{copy_options}{tag}"
1343
1344        def strtotime_sql(self, expression: exp.StrToTime):
1345            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1346            return self.func(
1347                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1348            )
1349
1350        def timestampsub_sql(self, expression: exp.TimestampSub):
1351            return self.sql(
1352                exp.TimestampAdd(
1353                    this=expression.this,
1354                    expression=expression.expression * -1,
1355                    unit=expression.unit,
1356                )
1357            )
1358
1359        def jsonextract_sql(self, expression: exp.JSONExtract):
1360            this = expression.this
1361
1362            # JSON strings are valid coming from other dialects such as BQ
1363            return self.func(
1364                "GET_PATH",
1365                exp.ParseJSON(this=this) if this.is_string else this,
1366                expression.expression,
1367            )
1368
1369        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1370            this = expression.this
1371            if not isinstance(this, exp.TsOrDsToTimestamp):
1372                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1373
1374            return self.func("TO_CHAR", this, self.format_time(expression))
1375
1376        def datesub_sql(self, expression: exp.DateSub) -> str:
1377            value = expression.expression
1378            if value:
1379                value.replace(value * (-1))
1380            else:
1381                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1382
1383            return date_delta_sql("DATEADD")(self, expression)
1384
1385        def select_sql(self, expression: exp.Select) -> str:
1386            limit = expression.args.get("limit")
1387            offset = expression.args.get("offset")
1388            if offset and not limit:
1389                expression.limit(exp.Null(), copy=False)
1390            return super().select_sql(expression)
1391
1392        def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
1393            is_materialized = expression.find(exp.MaterializedProperty)
1394            copy_grants_property = expression.find(exp.CopyGrantsProperty)
1395
1396            if expression.kind == "VIEW" and is_materialized and copy_grants_property:
1397                # For materialized views, COPY GRANTS is located *before* the columns list
1398                # This is in contrast to normal views where COPY GRANTS is located *after* the columns list
1399                # We default CopyGrantsProperty to POST_SCHEMA which means we need to output it POST_NAME if a materialized view is detected
1400                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-materialized-view#syntax
1401                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-view#syntax
1402                post_schema_properties = locations[exp.Properties.Location.POST_SCHEMA]
1403                post_schema_properties.pop(post_schema_properties.index(copy_grants_property))
1404
1405                this_name = self.sql(expression.this, "this")
1406                copy_grants = self.sql(copy_grants_property)
1407                this_schema = self.schema_columns_sql(expression.this)
1408                this_schema = f"{self.sep()}{this_schema}" if this_schema else ""
1409
1410                return f"{this_name}{self.sep()}{copy_grants}{this_schema}"
1411
1412            return super().createable_sql(expression, locations)
1413
1414        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
1415            this = expression.this
1416
1417            # If an ORDER BY clause is present, we need to remove it from ARRAY_AGG
1418            # and add it later as part of the WITHIN GROUP clause
1419            order = this if isinstance(this, exp.Order) else None
1420            if order:
1421                expression.set("this", order.this.pop())
1422
1423            expr_sql = super().arrayagg_sql(expression)
1424
1425            if order:
1426                expr_sql = self.sql(exp.WithinGroup(this=expr_sql, expression=order))
1427
1428            return expr_sql
1429
1430        def array_sql(self, expression: exp.Array) -> str:
1431            expressions = expression.expressions
1432
1433            first_expr = seq_get(expressions, 0)
1434            if isinstance(first_expr, exp.Select):
1435                # SELECT AS STRUCT foo AS alias_foo -> ARRAY_AGG(OBJECT_CONSTRUCT('alias_foo', foo))
1436                if first_expr.text("kind").upper() == "STRUCT":
1437                    object_construct_args = []
1438                    for expr in first_expr.expressions:
1439                        # Alias case: SELECT AS STRUCT foo AS alias_foo -> OBJECT_CONSTRUCT('alias_foo', foo)
1440                        # Column case: SELECT AS STRUCT foo -> OBJECT_CONSTRUCT('foo', foo)
1441                        name = expr.this if isinstance(expr, exp.Alias) else expr
1442
1443                        object_construct_args.extend([exp.Literal.string(expr.alias_or_name), name])
1444
1445                    array_agg = exp.ArrayAgg(
1446                        this=_build_object_construct(args=object_construct_args)
1447                    )
1448
1449                    first_expr.set("kind", None)
1450                    first_expr.set("expressions", [array_agg])
1451
1452                    return self.sql(first_expr.subquery())
1453
1454            return inline_array_sql(self, expression)
class Snowflake(sqlglot.dialects.dialect.Dialect):
 337class Snowflake(Dialect):
 338    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 339    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 340    NULL_ORDERING = "nulls_are_large"
 341    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 342    SUPPORTS_USER_DEFINED_TYPES = False
 343    SUPPORTS_SEMI_ANTI_JOIN = False
 344    PREFER_CTE_ALIAS_COLUMN = True
 345    TABLESAMPLE_SIZE_IS_PERCENT = True
 346    COPY_PARAMS_ARE_CSV = False
 347    ARRAY_AGG_INCLUDES_NULLS = None
 348
 349    TIME_MAPPING = {
 350        "YYYY": "%Y",
 351        "yyyy": "%Y",
 352        "YY": "%y",
 353        "yy": "%y",
 354        "MMMM": "%B",
 355        "mmmm": "%B",
 356        "MON": "%b",
 357        "mon": "%b",
 358        "MM": "%m",
 359        "mm": "%m",
 360        "DD": "%d",
 361        "dd": "%-d",
 362        "DY": "%a",
 363        "dy": "%w",
 364        "HH24": "%H",
 365        "hh24": "%H",
 366        "HH12": "%I",
 367        "hh12": "%I",
 368        "MI": "%M",
 369        "mi": "%M",
 370        "SS": "%S",
 371        "ss": "%S",
 372        "FF6": "%f",
 373        "ff6": "%f",
 374    }
 375
 376    DATE_PART_MAPPING = {
 377        **Dialect.DATE_PART_MAPPING,
 378        "ISOWEEK": "WEEKISO",
 379    }
 380
 381    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 382        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 383        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 384        if (
 385            isinstance(expression, exp.Identifier)
 386            and isinstance(expression.parent, exp.Table)
 387            and expression.name.lower() == "dual"
 388        ):
 389            return expression  # type: ignore
 390
 391        return super().quote_identifier(expression, identify=identify)
 392
 393    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
 394        SINGLE_TOKENS = jsonpath.JSONPathTokenizer.SINGLE_TOKENS.copy()
 395        SINGLE_TOKENS.pop("$")
 396
 397    class Parser(parser.Parser):
 398        IDENTIFY_PIVOT_STRINGS = True
 399        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 400        COLON_IS_VARIANT_EXTRACT = True
 401
 402        ID_VAR_TOKENS = {
 403            *parser.Parser.ID_VAR_TOKENS,
 404            TokenType.MATCH_CONDITION,
 405        }
 406
 407        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 408        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 409
 410        COLON_PLACEHOLDER_TOKENS = ID_VAR_TOKENS | {TokenType.NUMBER}
 411
 412        FUNCTIONS = {
 413            **parser.Parser.FUNCTIONS,
 414            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 415            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 416            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 417                this=seq_get(args, 1), expression=seq_get(args, 0)
 418            ),
 419            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 420                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 421                start=seq_get(args, 0),
 422                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 423                step=seq_get(args, 2),
 424            ),
 425            "BITXOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 426            "BIT_XOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 427            "BITOR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 428            "BIT_OR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 429            "BITSHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BITSHIFTLEFT"),
 430            "BIT_SHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BIT_SHIFTLEFT"),
 431            "BITSHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BITSHIFTRIGHT"),
 432            "BIT_SHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BIT_SHIFTRIGHT"),
 433            "BOOLXOR": _build_bitwise(exp.Xor, "BOOLXOR"),
 434            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 435            "DATE_TRUNC": _date_trunc_to_time,
 436            "DATEADD": _build_date_time_add(exp.DateAdd),
 437            "DATEDIFF": _build_datediff,
 438            "DIV0": _build_if_from_div0,
 439            "EDITDISTANCE": lambda args: exp.Levenshtein(
 440                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 441            ),
 442            "FLATTEN": exp.Explode.from_arg_list,
 443            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 444                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 445            ),
 446            "HEX_DECODE_BINARY": exp.Unhex.from_arg_list,
 447            "IFF": exp.If.from_arg_list,
 448            "LAST_DAY": lambda args: exp.LastDay(
 449                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 450            ),
 451            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 452            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 453            "NULLIFZERO": _build_if_from_nullifzero,
 454            "OBJECT_CONSTRUCT": _build_object_construct,
 455            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 456            "REGEXP_REPLACE": _build_regexp_replace,
 457            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 458            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 459            "RLIKE": exp.RegexpLike.from_arg_list,
 460            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 461            "TABLE": lambda args: exp.TableFromRows(this=seq_get(args, 0)),
 462            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 463            "TIMEDIFF": _build_datediff,
 464            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 465            "TIMESTAMPDIFF": _build_datediff,
 466            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 467            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 468            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 469            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 470            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 471            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 472            "TRY_TO_TIME": _build_datetime("TRY_TO_TIME", exp.DataType.Type.TIME, safe=True),
 473            "TRY_TO_TIMESTAMP": _build_datetime(
 474                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 475            ),
 476            "TO_CHAR": build_timetostr_or_tochar,
 477            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 478            "TO_NUMBER": lambda args: exp.ToNumber(
 479                this=seq_get(args, 0),
 480                format=seq_get(args, 1),
 481                precision=seq_get(args, 2),
 482                scale=seq_get(args, 3),
 483            ),
 484            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 485            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 486            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 487            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 488            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 489            "TO_VARCHAR": exp.ToChar.from_arg_list,
 490            "ZEROIFNULL": _build_if_from_zeroifnull,
 491        }
 492
 493        FUNCTION_PARSERS = {
 494            **parser.Parser.FUNCTION_PARSERS,
 495            "DATE_PART": lambda self: self._parse_date_part(),
 496            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 497            "LISTAGG": lambda self: self._parse_string_agg(),
 498        }
 499        FUNCTION_PARSERS.pop("TRIM")
 500
 501        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 502
 503        RANGE_PARSERS = {
 504            **parser.Parser.RANGE_PARSERS,
 505            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 506            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 507        }
 508
 509        ALTER_PARSERS = {
 510            **parser.Parser.ALTER_PARSERS,
 511            "UNSET": lambda self: self.expression(
 512                exp.Set,
 513                tag=self._match_text_seq("TAG"),
 514                expressions=self._parse_csv(self._parse_id_var),
 515                unset=True,
 516            ),
 517        }
 518
 519        STATEMENT_PARSERS = {
 520            **parser.Parser.STATEMENT_PARSERS,
 521            TokenType.GET: lambda self: self._parse_get(),
 522            TokenType.PUT: lambda self: self._parse_put(),
 523            TokenType.SHOW: lambda self: self._parse_show(),
 524        }
 525
 526        PROPERTY_PARSERS = {
 527            **parser.Parser.PROPERTY_PARSERS,
 528            "CREDENTIALS": lambda self: self._parse_credentials_property(),
 529            "FILE_FORMAT": lambda self: self._parse_file_format_property(),
 530            "LOCATION": lambda self: self._parse_location_property(),
 531            "TAG": lambda self: self._parse_tag(),
 532            "USING": lambda self: self._match_text_seq("TEMPLATE")
 533            and self.expression(exp.UsingTemplateProperty, this=self._parse_statement()),
 534        }
 535
 536        TYPE_CONVERTERS = {
 537            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 538            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 539        }
 540
 541        SHOW_PARSERS = {
 542            "DATABASES": _show_parser("DATABASES"),
 543            "TERSE DATABASES": _show_parser("DATABASES"),
 544            "SCHEMAS": _show_parser("SCHEMAS"),
 545            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 546            "OBJECTS": _show_parser("OBJECTS"),
 547            "TERSE OBJECTS": _show_parser("OBJECTS"),
 548            "TABLES": _show_parser("TABLES"),
 549            "TERSE TABLES": _show_parser("TABLES"),
 550            "VIEWS": _show_parser("VIEWS"),
 551            "TERSE VIEWS": _show_parser("VIEWS"),
 552            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 553            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 554            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 555            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 556            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 557            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 558            "SEQUENCES": _show_parser("SEQUENCES"),
 559            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 560            "STAGES": _show_parser("STAGES"),
 561            "COLUMNS": _show_parser("COLUMNS"),
 562            "USERS": _show_parser("USERS"),
 563            "TERSE USERS": _show_parser("USERS"),
 564            "FILE FORMATS": _show_parser("FILE FORMATS"),
 565            "FUNCTIONS": _show_parser("FUNCTIONS"),
 566            "PROCEDURES": _show_parser("PROCEDURES"),
 567            "WAREHOUSES": _show_parser("WAREHOUSES"),
 568        }
 569
 570        CONSTRAINT_PARSERS = {
 571            **parser.Parser.CONSTRAINT_PARSERS,
 572            "WITH": lambda self: self._parse_with_constraint(),
 573            "MASKING": lambda self: self._parse_with_constraint(),
 574            "PROJECTION": lambda self: self._parse_with_constraint(),
 575            "TAG": lambda self: self._parse_with_constraint(),
 576        }
 577
 578        STAGED_FILE_SINGLE_TOKENS = {
 579            TokenType.DOT,
 580            TokenType.MOD,
 581            TokenType.SLASH,
 582        }
 583
 584        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 585
 586        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 587
 588        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 589
 590        LAMBDAS = {
 591            **parser.Parser.LAMBDAS,
 592            TokenType.ARROW: lambda self, expressions: self.expression(
 593                exp.Lambda,
 594                this=self._replace_lambda(
 595                    self._parse_assignment(),
 596                    expressions,
 597                ),
 598                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 599            ),
 600        }
 601
 602        def _parse_use(self) -> exp.Use:
 603            if self._match_text_seq("SECONDARY", "ROLES"):
 604                this = self._match_texts(("ALL", "NONE")) and exp.var(self._prev.text.upper())
 605                roles = None if this else self._parse_csv(lambda: self._parse_table(schema=False))
 606                return self.expression(
 607                    exp.Use, kind="SECONDARY ROLES", this=this, expressions=roles
 608                )
 609
 610            return super()._parse_use()
 611
 612        def _negate_range(
 613            self, this: t.Optional[exp.Expression] = None
 614        ) -> t.Optional[exp.Expression]:
 615            if not this:
 616                return this
 617
 618            query = this.args.get("query")
 619            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 620                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 621                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 622                # which can produce different results (most likely a SnowFlake bug).
 623                #
 624                # https://docs.snowflake.com/en/sql-reference/functions/in
 625                # Context: https://github.com/tobymao/sqlglot/issues/3890
 626                return self.expression(
 627                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 628                )
 629
 630            return self.expression(exp.Not, this=this)
 631
 632        def _parse_tag(self) -> exp.Tags:
 633            return self.expression(
 634                exp.Tags,
 635                expressions=self._parse_wrapped_csv(self._parse_property),
 636            )
 637
 638        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 639            if self._prev.token_type != TokenType.WITH:
 640                self._retreat(self._index - 1)
 641
 642            if self._match_text_seq("MASKING", "POLICY"):
 643                policy = self._parse_column()
 644                return self.expression(
 645                    exp.MaskingPolicyColumnConstraint,
 646                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 647                    expressions=self._match(TokenType.USING)
 648                    and self._parse_wrapped_csv(self._parse_id_var),
 649                )
 650            if self._match_text_seq("PROJECTION", "POLICY"):
 651                policy = self._parse_column()
 652                return self.expression(
 653                    exp.ProjectionPolicyColumnConstraint,
 654                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 655                )
 656            if self._match(TokenType.TAG):
 657                return self._parse_tag()
 658
 659            return None
 660
 661        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
 662            if self._match(TokenType.TAG):
 663                return self._parse_tag()
 664
 665            return super()._parse_with_property()
 666
 667        def _parse_create(self) -> exp.Create | exp.Command:
 668            expression = super()._parse_create()
 669            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 670                # Replace the Table node with the enclosed Identifier
 671                expression.this.replace(expression.this.this)
 672
 673            return expression
 674
 675        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 676        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 677        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 678            this = self._parse_var() or self._parse_type()
 679
 680            if not this:
 681                return None
 682
 683            self._match(TokenType.COMMA)
 684            expression = self._parse_bitwise()
 685            this = map_date_part(this)
 686            name = this.name.upper()
 687
 688            if name.startswith("EPOCH"):
 689                if name == "EPOCH_MILLISECOND":
 690                    scale = 10**3
 691                elif name == "EPOCH_MICROSECOND":
 692                    scale = 10**6
 693                elif name == "EPOCH_NANOSECOND":
 694                    scale = 10**9
 695                else:
 696                    scale = None
 697
 698                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 699                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 700
 701                if scale:
 702                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 703
 704                return to_unix
 705
 706            return self.expression(exp.Extract, this=this, expression=expression)
 707
 708        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 709            if is_map:
 710                # Keys are strings in Snowflake's objects, see also:
 711                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 712                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 713                return self._parse_slice(self._parse_string())
 714
 715            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 716
 717        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 718            lateral = super()._parse_lateral()
 719            if not lateral:
 720                return lateral
 721
 722            if isinstance(lateral.this, exp.Explode):
 723                table_alias = lateral.args.get("alias")
 724                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 725                if table_alias and not table_alias.args.get("columns"):
 726                    table_alias.set("columns", columns)
 727                elif not table_alias:
 728                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 729
 730            return lateral
 731
 732        def _parse_table_parts(
 733            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 734        ) -> exp.Table:
 735            # https://docs.snowflake.com/en/user-guide/querying-stage
 736            if self._match(TokenType.STRING, advance=False):
 737                table = self._parse_string()
 738            elif self._match_text_seq("@", advance=False):
 739                table = self._parse_location_path()
 740            else:
 741                table = None
 742
 743            if table:
 744                file_format = None
 745                pattern = None
 746
 747                wrapped = self._match(TokenType.L_PAREN)
 748                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 749                    if self._match_text_seq("FILE_FORMAT", "=>"):
 750                        file_format = self._parse_string() or super()._parse_table_parts(
 751                            is_db_reference=is_db_reference
 752                        )
 753                    elif self._match_text_seq("PATTERN", "=>"):
 754                        pattern = self._parse_string()
 755                    else:
 756                        break
 757
 758                    self._match(TokenType.COMMA)
 759
 760                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 761            else:
 762                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 763
 764            return table
 765
 766        def _parse_table(
 767            self,
 768            schema: bool = False,
 769            joins: bool = False,
 770            alias_tokens: t.Optional[t.Collection[TokenType]] = None,
 771            parse_bracket: bool = False,
 772            is_db_reference: bool = False,
 773            parse_partition: bool = False,
 774        ) -> t.Optional[exp.Expression]:
 775            table = super()._parse_table(
 776                schema=schema,
 777                joins=joins,
 778                alias_tokens=alias_tokens,
 779                parse_bracket=parse_bracket,
 780                is_db_reference=is_db_reference,
 781                parse_partition=parse_partition,
 782            )
 783            if isinstance(table, exp.Table) and isinstance(table.this, exp.TableFromRows):
 784                table_from_rows = table.this
 785                for arg in exp.TableFromRows.arg_types:
 786                    if arg != "this":
 787                        table_from_rows.set(arg, table.args.get(arg))
 788
 789                table = table_from_rows
 790
 791            return table
 792
 793        def _parse_id_var(
 794            self,
 795            any_token: bool = True,
 796            tokens: t.Optional[t.Collection[TokenType]] = None,
 797        ) -> t.Optional[exp.Expression]:
 798            if self._match_text_seq("IDENTIFIER", "("):
 799                identifier = (
 800                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 801                    or self._parse_string()
 802                )
 803                self._match_r_paren()
 804                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 805
 806            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 807
 808        def _parse_show_snowflake(self, this: str) -> exp.Show:
 809            scope = None
 810            scope_kind = None
 811
 812            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 813            # which is syntactically valid but has no effect on the output
 814            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 815
 816            history = self._match_text_seq("HISTORY")
 817
 818            like = self._parse_string() if self._match(TokenType.LIKE) else None
 819
 820            if self._match(TokenType.IN):
 821                if self._match_text_seq("ACCOUNT"):
 822                    scope_kind = "ACCOUNT"
 823                elif self._match_text_seq("CLASS"):
 824                    scope_kind = "CLASS"
 825                    scope = self._parse_table_parts()
 826                elif self._match_text_seq("APPLICATION"):
 827                    scope_kind = "APPLICATION"
 828                    if self._match_text_seq("PACKAGE"):
 829                        scope_kind += " PACKAGE"
 830                    scope = self._parse_table_parts()
 831                elif self._match_set(self.DB_CREATABLES):
 832                    scope_kind = self._prev.text.upper()
 833                    if self._curr:
 834                        scope = self._parse_table_parts()
 835                elif self._curr:
 836                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 837                    scope = self._parse_table_parts()
 838
 839            return self.expression(
 840                exp.Show,
 841                **{
 842                    "terse": terse,
 843                    "this": this,
 844                    "history": history,
 845                    "like": like,
 846                    "scope": scope,
 847                    "scope_kind": scope_kind,
 848                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 849                    "limit": self._parse_limit(),
 850                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 851                    "privileges": self._match_text_seq("WITH", "PRIVILEGES")
 852                    and self._parse_csv(lambda: self._parse_var(any_token=True, upper=True)),
 853                },
 854            )
 855
 856        def _parse_put(self) -> exp.Put | exp.Command:
 857            if self._curr.token_type != TokenType.STRING:
 858                return self._parse_as_command(self._prev)
 859
 860            return self.expression(
 861                exp.Put,
 862                this=self._parse_string(),
 863                target=self._parse_location_path(),
 864                properties=self._parse_properties(),
 865            )
 866
 867        def _parse_get(self) -> exp.Get | exp.Command:
 868            start = self._prev
 869            target = self._parse_location_path()
 870
 871            # Parse as command if unquoted file path
 872            if self._curr.token_type == TokenType.URI_START:
 873                return self._parse_as_command(start)
 874
 875            return self.expression(
 876                exp.Get,
 877                this=self._parse_string(),
 878                target=target,
 879                properties=self._parse_properties(),
 880            )
 881
 882        def _parse_location_property(self) -> exp.LocationProperty:
 883            self._match(TokenType.EQ)
 884            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 885
 886        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 887            # Parse either a subquery or a staged file
 888            return (
 889                self._parse_select(table=True, parse_subquery_alias=False)
 890                if self._match(TokenType.L_PAREN, advance=False)
 891                else self._parse_table_parts()
 892            )
 893
 894        def _parse_location_path(self) -> exp.Var:
 895            start = self._curr
 896            self._advance_any(ignore_reserved=True)
 897
 898            # We avoid consuming a comma token because external tables like @foo and @bar
 899            # can be joined in a query with a comma separator, as well as closing paren
 900            # in case of subqueries
 901            while self._is_connected() and not self._match_set(
 902                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 903            ):
 904                self._advance_any(ignore_reserved=True)
 905
 906            return exp.var(self._find_sql(start, self._prev))
 907
 908        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 909            this = super()._parse_lambda_arg()
 910
 911            if not this:
 912                return this
 913
 914            typ = self._parse_types()
 915
 916            if typ:
 917                return self.expression(exp.Cast, this=this, to=typ)
 918
 919            return this
 920
 921        def _parse_foreign_key(self) -> exp.ForeignKey:
 922            # inlineFK, the REFERENCES columns are implied
 923            if self._match(TokenType.REFERENCES, advance=False):
 924                return self.expression(exp.ForeignKey)
 925
 926            # outoflineFK, explicitly names the columns
 927            return super()._parse_foreign_key()
 928
 929        def _parse_file_format_property(self) -> exp.FileFormatProperty:
 930            self._match(TokenType.EQ)
 931            if self._match(TokenType.L_PAREN, advance=False):
 932                expressions = self._parse_wrapped_options()
 933            else:
 934                expressions = [self._parse_format_name()]
 935
 936            return self.expression(
 937                exp.FileFormatProperty,
 938                expressions=expressions,
 939            )
 940
 941        def _parse_credentials_property(self) -> exp.CredentialsProperty:
 942            return self.expression(
 943                exp.CredentialsProperty,
 944                expressions=self._parse_wrapped_options(),
 945            )
 946
 947    class Tokenizer(tokens.Tokenizer):
 948        STRING_ESCAPES = ["\\", "'"]
 949        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 950        RAW_STRINGS = ["$$"]
 951        COMMENTS = ["--", "//", ("/*", "*/")]
 952        NESTED_COMMENTS = False
 953
 954        KEYWORDS = {
 955            **tokens.Tokenizer.KEYWORDS,
 956            "FILE://": TokenType.URI_START,
 957            "BYTEINT": TokenType.INT,
 958            "EXCLUDE": TokenType.EXCEPT,
 959            "FILE FORMAT": TokenType.FILE_FORMAT,
 960            "GET": TokenType.GET,
 961            "ILIKE ANY": TokenType.ILIKE_ANY,
 962            "LIKE ANY": TokenType.LIKE_ANY,
 963            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 964            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 965            "MINUS": TokenType.EXCEPT,
 966            "NCHAR VARYING": TokenType.VARCHAR,
 967            "PUT": TokenType.PUT,
 968            "REMOVE": TokenType.COMMAND,
 969            "RM": TokenType.COMMAND,
 970            "SAMPLE": TokenType.TABLE_SAMPLE,
 971            "SQL_DOUBLE": TokenType.DOUBLE,
 972            "SQL_VARCHAR": TokenType.VARCHAR,
 973            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 974            "TAG": TokenType.TAG,
 975            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 976            "TOP": TokenType.TOP,
 977            "WAREHOUSE": TokenType.WAREHOUSE,
 978            "STAGE": TokenType.STAGE,
 979            "STREAMLIT": TokenType.STREAMLIT,
 980        }
 981        KEYWORDS.pop("/*+")
 982
 983        SINGLE_TOKENS = {
 984            **tokens.Tokenizer.SINGLE_TOKENS,
 985            "$": TokenType.PARAMETER,
 986        }
 987
 988        VAR_SINGLE_TOKENS = {"$"}
 989
 990        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 991
 992    class Generator(generator.Generator):
 993        PARAMETER_TOKEN = "$"
 994        MATCHED_BY_SOURCE = False
 995        SINGLE_STRING_INTERVAL = True
 996        JOIN_HINTS = False
 997        TABLE_HINTS = False
 998        QUERY_HINTS = False
 999        AGGREGATE_FILTER_SUPPORTED = False
1000        SUPPORTS_TABLE_COPY = False
1001        COLLATE_IS_FUNC = True
1002        LIMIT_ONLY_LITERALS = True
1003        JSON_KEY_VALUE_PAIR_SEP = ","
1004        INSERT_OVERWRITE = " OVERWRITE INTO"
1005        STRUCT_DELIMITER = ("(", ")")
1006        COPY_PARAMS_ARE_WRAPPED = False
1007        COPY_PARAMS_EQ_REQUIRED = True
1008        STAR_EXCEPT = "EXCLUDE"
1009        SUPPORTS_EXPLODING_PROJECTIONS = False
1010        ARRAY_CONCAT_IS_VAR_LEN = False
1011        SUPPORTS_CONVERT_TIMEZONE = True
1012        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
1013        SUPPORTS_MEDIAN = True
1014        ARRAY_SIZE_NAME = "ARRAY_SIZE"
1015
1016        TRANSFORMS = {
1017            **generator.Generator.TRANSFORMS,
1018            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1019            exp.ArgMax: rename_func("MAX_BY"),
1020            exp.ArgMin: rename_func("MIN_BY"),
1021            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
1022            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
1023            exp.AtTimeZone: lambda self, e: self.func(
1024                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
1025            ),
1026            exp.BitwiseOr: rename_func("BITOR"),
1027            exp.BitwiseXor: rename_func("BITXOR"),
1028            exp.BitwiseLeftShift: rename_func("BITSHIFTLEFT"),
1029            exp.BitwiseRightShift: rename_func("BITSHIFTRIGHT"),
1030            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
1031            exp.DateAdd: date_delta_sql("DATEADD"),
1032            exp.DateDiff: date_delta_sql("DATEDIFF"),
1033            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
1034            exp.DatetimeDiff: timestampdiff_sql,
1035            exp.DateStrToDate: datestrtodate_sql,
1036            exp.DayOfMonth: rename_func("DAYOFMONTH"),
1037            exp.DayOfWeek: rename_func("DAYOFWEEK"),
1038            exp.DayOfWeekIso: rename_func("DAYOFWEEKISO"),
1039            exp.DayOfYear: rename_func("DAYOFYEAR"),
1040            exp.Explode: rename_func("FLATTEN"),
1041            exp.Extract: lambda self, e: self.func(
1042                "DATE_PART", map_date_part(e.this, self.dialect), e.expression
1043            ),
1044            exp.FileFormatProperty: lambda self,
1045            e: f"FILE_FORMAT=({self.expressions(e, 'expressions', sep=' ')})",
1046            exp.FromTimeZone: lambda self, e: self.func(
1047                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
1048            ),
1049            exp.GenerateSeries: lambda self, e: self.func(
1050                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
1051            ),
1052            exp.GroupConcat: lambda self, e: groupconcat_sql(self, e, sep=""),
1053            exp.If: if_sql(name="IFF", false_value="NULL"),
1054            exp.JSONExtractArray: _json_extract_value_array_sql,
1055            exp.JSONExtractScalar: lambda self, e: self.func(
1056                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
1057            ),
1058            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
1059            exp.JSONPathRoot: lambda *_: "",
1060            exp.JSONValueArray: _json_extract_value_array_sql,
1061            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
1062                rename_func("EDITDISTANCE")
1063            ),
1064            exp.LocationProperty: lambda self, e: f"LOCATION={self.sql(e, 'this')}",
1065            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
1066            exp.LogicalOr: rename_func("BOOLOR_AGG"),
1067            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1068            exp.MakeInterval: no_make_interval_sql,
1069            exp.Max: max_or_greatest,
1070            exp.Min: min_or_least,
1071            exp.ParseJSON: lambda self, e: self.func(
1072                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
1073            ),
1074            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1075            exp.PercentileCont: transforms.preprocess(
1076                [transforms.add_within_group_for_percentiles]
1077            ),
1078            exp.PercentileDisc: transforms.preprocess(
1079                [transforms.add_within_group_for_percentiles]
1080            ),
1081            exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]),
1082            exp.RegexpExtract: _regexpextract_sql,
1083            exp.RegexpExtractAll: _regexpextract_sql,
1084            exp.RegexpILike: _regexpilike_sql,
1085            exp.Rand: rename_func("RANDOM"),
1086            exp.Select: transforms.preprocess(
1087                [
1088                    transforms.eliminate_window_clause,
1089                    transforms.eliminate_distinct_on,
1090                    transforms.explode_projection_to_unnest(),
1091                    transforms.eliminate_semi_and_anti_joins,
1092                    _transform_generate_date_array,
1093                ]
1094            ),
1095            exp.SHA: rename_func("SHA1"),
1096            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
1097            exp.StartsWith: rename_func("STARTSWITH"),
1098            exp.StrPosition: lambda self, e: strposition_sql(
1099                self, e, func_name="CHARINDEX", supports_position=True
1100            ),
1101            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
1102            exp.Stuff: rename_func("INSERT"),
1103            exp.TimeAdd: date_delta_sql("TIMEADD"),
1104            exp.Timestamp: no_timestamp_sql,
1105            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
1106            exp.TimestampDiff: lambda self, e: self.func(
1107                "TIMESTAMPDIFF", e.unit, e.expression, e.this
1108            ),
1109            exp.TimestampTrunc: timestamptrunc_sql(),
1110            exp.TimeStrToTime: timestrtotime_sql,
1111            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
1112            exp.ToArray: rename_func("TO_ARRAY"),
1113            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
1114            exp.ToDouble: rename_func("TO_DOUBLE"),
1115            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
1116            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
1117            exp.TsOrDsToDate: lambda self, e: self.func(
1118                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
1119            ),
1120            exp.TsOrDsToTime: lambda self, e: self.func(
1121                "TRY_TO_TIME" if e.args.get("safe") else "TO_TIME", e.this, self.format_time(e)
1122            ),
1123            exp.Unhex: rename_func("HEX_DECODE_BINARY"),
1124            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
1125            exp.Uuid: rename_func("UUID_STRING"),
1126            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1127            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
1128            exp.Xor: rename_func("BOOLXOR"),
1129        }
1130
1131        SUPPORTED_JSON_PATH_PARTS = {
1132            exp.JSONPathKey,
1133            exp.JSONPathRoot,
1134            exp.JSONPathSubscript,
1135        }
1136
1137        TYPE_MAPPING = {
1138            **generator.Generator.TYPE_MAPPING,
1139            exp.DataType.Type.NESTED: "OBJECT",
1140            exp.DataType.Type.STRUCT: "OBJECT",
1141            exp.DataType.Type.BIGDECIMAL: "DOUBLE",
1142        }
1143
1144        TOKEN_MAPPING = {
1145            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
1146        }
1147
1148        PROPERTIES_LOCATION = {
1149            **generator.Generator.PROPERTIES_LOCATION,
1150            exp.CredentialsProperty: exp.Properties.Location.POST_WITH,
1151            exp.LocationProperty: exp.Properties.Location.POST_WITH,
1152            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1153            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
1154            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1155        }
1156
1157        UNSUPPORTED_VALUES_EXPRESSIONS = {
1158            exp.Map,
1159            exp.StarMap,
1160            exp.Struct,
1161            exp.VarMap,
1162        }
1163
1164        RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS = (exp.ArrayAgg,)
1165
1166        def with_properties(self, properties: exp.Properties) -> str:
1167            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1168
1169        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1170            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1171                values_as_table = False
1172
1173            return super().values_sql(expression, values_as_table=values_as_table)
1174
1175        def datatype_sql(self, expression: exp.DataType) -> str:
1176            expressions = expression.expressions
1177            if (
1178                expressions
1179                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1180                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1181            ):
1182                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1183                return "OBJECT"
1184
1185            return super().datatype_sql(expression)
1186
1187        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1188            return self.func(
1189                "TO_NUMBER",
1190                expression.this,
1191                expression.args.get("format"),
1192                expression.args.get("precision"),
1193                expression.args.get("scale"),
1194            )
1195
1196        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1197            milli = expression.args.get("milli")
1198            if milli is not None:
1199                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1200                expression.set("nano", milli_to_nano)
1201
1202            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1203
1204        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1205            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1206                return self.func("TO_GEOGRAPHY", expression.this)
1207            if expression.is_type(exp.DataType.Type.GEOMETRY):
1208                return self.func("TO_GEOMETRY", expression.this)
1209
1210            return super().cast_sql(expression, safe_prefix=safe_prefix)
1211
1212        def trycast_sql(self, expression: exp.TryCast) -> str:
1213            value = expression.this
1214
1215            if value.type is None:
1216                from sqlglot.optimizer.annotate_types import annotate_types
1217
1218                value = annotate_types(value, dialect=self.dialect)
1219
1220            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1221                return super().trycast_sql(expression)
1222
1223            # TRY_CAST only works for string values in Snowflake
1224            return self.cast_sql(expression)
1225
1226        def log_sql(self, expression: exp.Log) -> str:
1227            if not expression.expression:
1228                return self.func("LN", expression.this)
1229
1230            return super().log_sql(expression)
1231
1232        def unnest_sql(self, expression: exp.Unnest) -> str:
1233            unnest_alias = expression.args.get("alias")
1234            offset = expression.args.get("offset")
1235
1236            unnest_alias_columns = unnest_alias.columns if unnest_alias else []
1237            value = seq_get(unnest_alias_columns, 0) or exp.to_identifier("value")
1238
1239            columns = [
1240                exp.to_identifier("seq"),
1241                exp.to_identifier("key"),
1242                exp.to_identifier("path"),
1243                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1244                value,
1245                exp.to_identifier("this"),
1246            ]
1247
1248            if unnest_alias:
1249                unnest_alias.set("columns", columns)
1250            else:
1251                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1252
1253            table_input = self.sql(expression.expressions[0])
1254            if not table_input.startswith("INPUT =>"):
1255                table_input = f"INPUT => {table_input}"
1256
1257            explode = f"TABLE(FLATTEN({table_input}))"
1258            alias = self.sql(unnest_alias)
1259            alias = f" AS {alias}" if alias else ""
1260            value = "" if isinstance(expression.parent, (exp.From, exp.Join)) else f"{value} FROM "
1261
1262            return f"{value}{explode}{alias}"
1263
1264        def show_sql(self, expression: exp.Show) -> str:
1265            terse = "TERSE " if expression.args.get("terse") else ""
1266            history = " HISTORY" if expression.args.get("history") else ""
1267            like = self.sql(expression, "like")
1268            like = f" LIKE {like}" if like else ""
1269
1270            scope = self.sql(expression, "scope")
1271            scope = f" {scope}" if scope else ""
1272
1273            scope_kind = self.sql(expression, "scope_kind")
1274            if scope_kind:
1275                scope_kind = f" IN {scope_kind}"
1276
1277            starts_with = self.sql(expression, "starts_with")
1278            if starts_with:
1279                starts_with = f" STARTS WITH {starts_with}"
1280
1281            limit = self.sql(expression, "limit")
1282
1283            from_ = self.sql(expression, "from")
1284            if from_:
1285                from_ = f" FROM {from_}"
1286
1287            privileges = self.expressions(expression, key="privileges", flat=True)
1288            privileges = f" WITH PRIVILEGES {privileges}" if privileges else ""
1289
1290            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}{privileges}"
1291
1292        def describe_sql(self, expression: exp.Describe) -> str:
1293            # Default to table if kind is unknown
1294            kind_value = expression.args.get("kind") or "TABLE"
1295            kind = f" {kind_value}" if kind_value else ""
1296            this = f" {self.sql(expression, 'this')}"
1297            expressions = self.expressions(expression, flat=True)
1298            expressions = f" {expressions}" if expressions else ""
1299            return f"DESCRIBE{kind}{this}{expressions}"
1300
1301        def generatedasidentitycolumnconstraint_sql(
1302            self, expression: exp.GeneratedAsIdentityColumnConstraint
1303        ) -> str:
1304            start = expression.args.get("start")
1305            start = f" START {start}" if start else ""
1306            increment = expression.args.get("increment")
1307            increment = f" INCREMENT {increment}" if increment else ""
1308            return f"AUTOINCREMENT{start}{increment}"
1309
1310        def cluster_sql(self, expression: exp.Cluster) -> str:
1311            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1312
1313        def struct_sql(self, expression: exp.Struct) -> str:
1314            keys = []
1315            values = []
1316
1317            for i, e in enumerate(expression.expressions):
1318                if isinstance(e, exp.PropertyEQ):
1319                    keys.append(
1320                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1321                    )
1322                    values.append(e.expression)
1323                else:
1324                    keys.append(exp.Literal.string(f"_{i}"))
1325                    values.append(e)
1326
1327            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1328
1329        @unsupported_args("weight", "accuracy")
1330        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1331            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1332
1333        def alterset_sql(self, expression: exp.AlterSet) -> str:
1334            exprs = self.expressions(expression, flat=True)
1335            exprs = f" {exprs}" if exprs else ""
1336            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1337            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1338            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1339            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1340            tag = self.expressions(expression, key="tag", flat=True)
1341            tag = f" TAG {tag}" if tag else ""
1342
1343            return f"SET{exprs}{file_format}{copy_options}{tag}"
1344
1345        def strtotime_sql(self, expression: exp.StrToTime):
1346            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1347            return self.func(
1348                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1349            )
1350
1351        def timestampsub_sql(self, expression: exp.TimestampSub):
1352            return self.sql(
1353                exp.TimestampAdd(
1354                    this=expression.this,
1355                    expression=expression.expression * -1,
1356                    unit=expression.unit,
1357                )
1358            )
1359
1360        def jsonextract_sql(self, expression: exp.JSONExtract):
1361            this = expression.this
1362
1363            # JSON strings are valid coming from other dialects such as BQ
1364            return self.func(
1365                "GET_PATH",
1366                exp.ParseJSON(this=this) if this.is_string else this,
1367                expression.expression,
1368            )
1369
1370        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1371            this = expression.this
1372            if not isinstance(this, exp.TsOrDsToTimestamp):
1373                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1374
1375            return self.func("TO_CHAR", this, self.format_time(expression))
1376
1377        def datesub_sql(self, expression: exp.DateSub) -> str:
1378            value = expression.expression
1379            if value:
1380                value.replace(value * (-1))
1381            else:
1382                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1383
1384            return date_delta_sql("DATEADD")(self, expression)
1385
1386        def select_sql(self, expression: exp.Select) -> str:
1387            limit = expression.args.get("limit")
1388            offset = expression.args.get("offset")
1389            if offset and not limit:
1390                expression.limit(exp.Null(), copy=False)
1391            return super().select_sql(expression)
1392
1393        def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
1394            is_materialized = expression.find(exp.MaterializedProperty)
1395            copy_grants_property = expression.find(exp.CopyGrantsProperty)
1396
1397            if expression.kind == "VIEW" and is_materialized and copy_grants_property:
1398                # For materialized views, COPY GRANTS is located *before* the columns list
1399                # This is in contrast to normal views where COPY GRANTS is located *after* the columns list
1400                # We default CopyGrantsProperty to POST_SCHEMA which means we need to output it POST_NAME if a materialized view is detected
1401                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-materialized-view#syntax
1402                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-view#syntax
1403                post_schema_properties = locations[exp.Properties.Location.POST_SCHEMA]
1404                post_schema_properties.pop(post_schema_properties.index(copy_grants_property))
1405
1406                this_name = self.sql(expression.this, "this")
1407                copy_grants = self.sql(copy_grants_property)
1408                this_schema = self.schema_columns_sql(expression.this)
1409                this_schema = f"{self.sep()}{this_schema}" if this_schema else ""
1410
1411                return f"{this_name}{self.sep()}{copy_grants}{this_schema}"
1412
1413            return super().createable_sql(expression, locations)
1414
1415        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
1416            this = expression.this
1417
1418            # If an ORDER BY clause is present, we need to remove it from ARRAY_AGG
1419            # and add it later as part of the WITHIN GROUP clause
1420            order = this if isinstance(this, exp.Order) else None
1421            if order:
1422                expression.set("this", order.this.pop())
1423
1424            expr_sql = super().arrayagg_sql(expression)
1425
1426            if order:
1427                expr_sql = self.sql(exp.WithinGroup(this=expr_sql, expression=order))
1428
1429            return expr_sql
1430
1431        def array_sql(self, expression: exp.Array) -> str:
1432            expressions = expression.expressions
1433
1434            first_expr = seq_get(expressions, 0)
1435            if isinstance(first_expr, exp.Select):
1436                # SELECT AS STRUCT foo AS alias_foo -> ARRAY_AGG(OBJECT_CONSTRUCT('alias_foo', foo))
1437                if first_expr.text("kind").upper() == "STRUCT":
1438                    object_construct_args = []
1439                    for expr in first_expr.expressions:
1440                        # Alias case: SELECT AS STRUCT foo AS alias_foo -> OBJECT_CONSTRUCT('alias_foo', foo)
1441                        # Column case: SELECT AS STRUCT foo -> OBJECT_CONSTRUCT('foo', foo)
1442                        name = expr.this if isinstance(expr, exp.Alias) else expr
1443
1444                        object_construct_args.extend([exp.Literal.string(expr.alias_or_name), name])
1445
1446                    array_agg = exp.ArrayAgg(
1447                        this=_build_object_construct(args=object_construct_args)
1448                    )
1449
1450                    first_expr.set("kind", None)
1451                    first_expr.set("expressions", [array_agg])
1452
1453                    return self.sql(first_expr.subquery())
1454
1455            return inline_array_sql(self, expression)
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

ARRAY_AGG_INCLUDES_NULLS: Optional[bool] = None

Whether ArrayAgg needs to filter NULL values.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

DATE_PART_MAPPING = {'Y': 'YEAR', 'YY': 'YEAR', 'YYY': 'YEAR', 'YYYY': 'YEAR', 'YR': 'YEAR', 'YEARS': 'YEAR', 'YRS': 'YEAR', 'MM': 'MONTH', 'MON': 'MONTH', 'MONS': 'MONTH', 'MONTHS': 'MONTH', 'D': 'DAY', 'DD': 'DAY', 'DAYS': 'DAY', 'DAYOFMONTH': 'DAY', 'DAY OF WEEK': 'DAYOFWEEK', 'WEEKDAY': 'DAYOFWEEK', 'DOW': 'DAYOFWEEK', 'DW': 'DAYOFWEEK', 'WEEKDAY_ISO': 'DAYOFWEEKISO', 'DOW_ISO': 'DAYOFWEEKISO', 'DW_ISO': 'DAYOFWEEKISO', 'DAY OF YEAR': 'DAYOFYEAR', 'DOY': 'DAYOFYEAR', 'DY': 'DAYOFYEAR', 'W': 'WEEK', 'WK': 'WEEK', 'WEEKOFYEAR': 'WEEK', 'WOY': 'WEEK', 'WY': 'WEEK', 'WEEK_ISO': 'WEEKISO', 'WEEKOFYEARISO': 'WEEKISO', 'WEEKOFYEAR_ISO': 'WEEKISO', 'Q': 'QUARTER', 'QTR': 'QUARTER', 'QTRS': 'QUARTER', 'QUARTERS': 'QUARTER', 'H': 'HOUR', 'HH': 'HOUR', 'HR': 'HOUR', 'HOURS': 'HOUR', 'HRS': 'HOUR', 'M': 'MINUTE', 'MI': 'MINUTE', 'MIN': 'MINUTE', 'MINUTES': 'MINUTE', 'MINS': 'MINUTE', 'S': 'SECOND', 'SEC': 'SECOND', 'SECONDS': 'SECOND', 'SECS': 'SECOND', 'MS': 'MILLISECOND', 'MSEC': 'MILLISECOND', 'MSECS': 'MILLISECOND', 'MSECOND': 'MILLISECOND', 'MSECONDS': 'MILLISECOND', 'MILLISEC': 'MILLISECOND', 'MILLISECS': 'MILLISECOND', 'MILLISECON': 'MILLISECOND', 'MILLISECONDS': 'MILLISECOND', 'US': 'MICROSECOND', 'USEC': 'MICROSECOND', 'USECS': 'MICROSECOND', 'MICROSEC': 'MICROSECOND', 'MICROSECS': 'MICROSECOND', 'USECOND': 'MICROSECOND', 'USECONDS': 'MICROSECOND', 'MICROSECONDS': 'MICROSECOND', 'NS': 'NANOSECOND', 'NSEC': 'NANOSECOND', 'NANOSEC': 'NANOSECOND', 'NSECOND': 'NANOSECOND', 'NSECONDS': 'NANOSECOND', 'NANOSECS': 'NANOSECOND', 'EPOCH_SECOND': 'EPOCH', 'EPOCH_SECONDS': 'EPOCH', 'EPOCH_MILLISECONDS': 'EPOCH_MILLISECOND', 'EPOCH_MICROSECONDS': 'EPOCH_MICROSECOND', 'EPOCH_NANOSECONDS': 'EPOCH_NANOSECOND', 'TZH': 'TIMEZONE_HOUR', 'TZM': 'TIMEZONE_MINUTE', 'DEC': 'DECADE', 'DECS': 'DECADE', 'DECADES': 'DECADE', 'MIL': 'MILLENIUM', 'MILS': 'MILLENIUM', 'MILLENIA': 'MILLENIUM', 'C': 'CENTURY', 'CENT': 'CENTURY', 'CENTS': 'CENTURY', 'CENTURIES': 'CENTURY', 'ISOWEEK': 'WEEKISO'}
def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
381    def quote_identifier(self, expression: E, identify: bool = True) -> E:
382        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
383        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
384        if (
385            isinstance(expression, exp.Identifier)
386            and isinstance(expression.parent, exp.Table)
387            and expression.name.lower() == "dual"
388        ):
389            return expression  # type: ignore
390
391        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'Snowflake.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {'6': {0: True}}}, 'f': {'f': {'6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {'6': {0: True}}}, 'f': {'f': {'6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {}
INVERSE_FORMAT_TRIE: Dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.JSONPathTokenizer(sqlglot.jsonpath.JSONPathTokenizer):
393    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
394        SINGLE_TOKENS = jsonpath.JSONPathTokenizer.SINGLE_TOKENS.copy()
395        SINGLE_TOKENS.pop("$")
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '-': <TokenType.DASH: 'DASH'>, '.': <TokenType.DOT: 'DOT'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '"': <TokenType.QUOTE: 'QUOTE'>, '*': <TokenType.STAR: 'STAR'>}
class Snowflake.Parser(sqlglot.parser.Parser):
397    class Parser(parser.Parser):
398        IDENTIFY_PIVOT_STRINGS = True
399        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
400        COLON_IS_VARIANT_EXTRACT = True
401
402        ID_VAR_TOKENS = {
403            *parser.Parser.ID_VAR_TOKENS,
404            TokenType.MATCH_CONDITION,
405        }
406
407        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
408        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
409
410        COLON_PLACEHOLDER_TOKENS = ID_VAR_TOKENS | {TokenType.NUMBER}
411
412        FUNCTIONS = {
413            **parser.Parser.FUNCTIONS,
414            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
415            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
416            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
417                this=seq_get(args, 1), expression=seq_get(args, 0)
418            ),
419            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
420                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
421                start=seq_get(args, 0),
422                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
423                step=seq_get(args, 2),
424            ),
425            "BITXOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
426            "BIT_XOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
427            "BITOR": _build_bitwise(exp.BitwiseOr, "BITOR"),
428            "BIT_OR": _build_bitwise(exp.BitwiseOr, "BITOR"),
429            "BITSHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BITSHIFTLEFT"),
430            "BIT_SHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BIT_SHIFTLEFT"),
431            "BITSHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BITSHIFTRIGHT"),
432            "BIT_SHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BIT_SHIFTRIGHT"),
433            "BOOLXOR": _build_bitwise(exp.Xor, "BOOLXOR"),
434            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
435            "DATE_TRUNC": _date_trunc_to_time,
436            "DATEADD": _build_date_time_add(exp.DateAdd),
437            "DATEDIFF": _build_datediff,
438            "DIV0": _build_if_from_div0,
439            "EDITDISTANCE": lambda args: exp.Levenshtein(
440                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
441            ),
442            "FLATTEN": exp.Explode.from_arg_list,
443            "GET_PATH": lambda args, dialect: exp.JSONExtract(
444                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
445            ),
446            "HEX_DECODE_BINARY": exp.Unhex.from_arg_list,
447            "IFF": exp.If.from_arg_list,
448            "LAST_DAY": lambda args: exp.LastDay(
449                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
450            ),
451            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
452            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
453            "NULLIFZERO": _build_if_from_nullifzero,
454            "OBJECT_CONSTRUCT": _build_object_construct,
455            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
456            "REGEXP_REPLACE": _build_regexp_replace,
457            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
458            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
459            "RLIKE": exp.RegexpLike.from_arg_list,
460            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
461            "TABLE": lambda args: exp.TableFromRows(this=seq_get(args, 0)),
462            "TIMEADD": _build_date_time_add(exp.TimeAdd),
463            "TIMEDIFF": _build_datediff,
464            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
465            "TIMESTAMPDIFF": _build_datediff,
466            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
467            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
468            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
469            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
470            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
471            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
472            "TRY_TO_TIME": _build_datetime("TRY_TO_TIME", exp.DataType.Type.TIME, safe=True),
473            "TRY_TO_TIMESTAMP": _build_datetime(
474                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
475            ),
476            "TO_CHAR": build_timetostr_or_tochar,
477            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
478            "TO_NUMBER": lambda args: exp.ToNumber(
479                this=seq_get(args, 0),
480                format=seq_get(args, 1),
481                precision=seq_get(args, 2),
482                scale=seq_get(args, 3),
483            ),
484            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
485            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
486            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
487            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
488            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
489            "TO_VARCHAR": exp.ToChar.from_arg_list,
490            "ZEROIFNULL": _build_if_from_zeroifnull,
491        }
492
493        FUNCTION_PARSERS = {
494            **parser.Parser.FUNCTION_PARSERS,
495            "DATE_PART": lambda self: self._parse_date_part(),
496            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
497            "LISTAGG": lambda self: self._parse_string_agg(),
498        }
499        FUNCTION_PARSERS.pop("TRIM")
500
501        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
502
503        RANGE_PARSERS = {
504            **parser.Parser.RANGE_PARSERS,
505            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
506            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
507        }
508
509        ALTER_PARSERS = {
510            **parser.Parser.ALTER_PARSERS,
511            "UNSET": lambda self: self.expression(
512                exp.Set,
513                tag=self._match_text_seq("TAG"),
514                expressions=self._parse_csv(self._parse_id_var),
515                unset=True,
516            ),
517        }
518
519        STATEMENT_PARSERS = {
520            **parser.Parser.STATEMENT_PARSERS,
521            TokenType.GET: lambda self: self._parse_get(),
522            TokenType.PUT: lambda self: self._parse_put(),
523            TokenType.SHOW: lambda self: self._parse_show(),
524        }
525
526        PROPERTY_PARSERS = {
527            **parser.Parser.PROPERTY_PARSERS,
528            "CREDENTIALS": lambda self: self._parse_credentials_property(),
529            "FILE_FORMAT": lambda self: self._parse_file_format_property(),
530            "LOCATION": lambda self: self._parse_location_property(),
531            "TAG": lambda self: self._parse_tag(),
532            "USING": lambda self: self._match_text_seq("TEMPLATE")
533            and self.expression(exp.UsingTemplateProperty, this=self._parse_statement()),
534        }
535
536        TYPE_CONVERTERS = {
537            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
538            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
539        }
540
541        SHOW_PARSERS = {
542            "DATABASES": _show_parser("DATABASES"),
543            "TERSE DATABASES": _show_parser("DATABASES"),
544            "SCHEMAS": _show_parser("SCHEMAS"),
545            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
546            "OBJECTS": _show_parser("OBJECTS"),
547            "TERSE OBJECTS": _show_parser("OBJECTS"),
548            "TABLES": _show_parser("TABLES"),
549            "TERSE TABLES": _show_parser("TABLES"),
550            "VIEWS": _show_parser("VIEWS"),
551            "TERSE VIEWS": _show_parser("VIEWS"),
552            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
553            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
554            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
555            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
556            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
557            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
558            "SEQUENCES": _show_parser("SEQUENCES"),
559            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
560            "STAGES": _show_parser("STAGES"),
561            "COLUMNS": _show_parser("COLUMNS"),
562            "USERS": _show_parser("USERS"),
563            "TERSE USERS": _show_parser("USERS"),
564            "FILE FORMATS": _show_parser("FILE FORMATS"),
565            "FUNCTIONS": _show_parser("FUNCTIONS"),
566            "PROCEDURES": _show_parser("PROCEDURES"),
567            "WAREHOUSES": _show_parser("WAREHOUSES"),
568        }
569
570        CONSTRAINT_PARSERS = {
571            **parser.Parser.CONSTRAINT_PARSERS,
572            "WITH": lambda self: self._parse_with_constraint(),
573            "MASKING": lambda self: self._parse_with_constraint(),
574            "PROJECTION": lambda self: self._parse_with_constraint(),
575            "TAG": lambda self: self._parse_with_constraint(),
576        }
577
578        STAGED_FILE_SINGLE_TOKENS = {
579            TokenType.DOT,
580            TokenType.MOD,
581            TokenType.SLASH,
582        }
583
584        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
585
586        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
587
588        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
589
590        LAMBDAS = {
591            **parser.Parser.LAMBDAS,
592            TokenType.ARROW: lambda self, expressions: self.expression(
593                exp.Lambda,
594                this=self._replace_lambda(
595                    self._parse_assignment(),
596                    expressions,
597                ),
598                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
599            ),
600        }
601
602        def _parse_use(self) -> exp.Use:
603            if self._match_text_seq("SECONDARY", "ROLES"):
604                this = self._match_texts(("ALL", "NONE")) and exp.var(self._prev.text.upper())
605                roles = None if this else self._parse_csv(lambda: self._parse_table(schema=False))
606                return self.expression(
607                    exp.Use, kind="SECONDARY ROLES", this=this, expressions=roles
608                )
609
610            return super()._parse_use()
611
612        def _negate_range(
613            self, this: t.Optional[exp.Expression] = None
614        ) -> t.Optional[exp.Expression]:
615            if not this:
616                return this
617
618            query = this.args.get("query")
619            if isinstance(this, exp.In) and isinstance(query, exp.Query):
620                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
621                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
622                # which can produce different results (most likely a SnowFlake bug).
623                #
624                # https://docs.snowflake.com/en/sql-reference/functions/in
625                # Context: https://github.com/tobymao/sqlglot/issues/3890
626                return self.expression(
627                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
628                )
629
630            return self.expression(exp.Not, this=this)
631
632        def _parse_tag(self) -> exp.Tags:
633            return self.expression(
634                exp.Tags,
635                expressions=self._parse_wrapped_csv(self._parse_property),
636            )
637
638        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
639            if self._prev.token_type != TokenType.WITH:
640                self._retreat(self._index - 1)
641
642            if self._match_text_seq("MASKING", "POLICY"):
643                policy = self._parse_column()
644                return self.expression(
645                    exp.MaskingPolicyColumnConstraint,
646                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
647                    expressions=self._match(TokenType.USING)
648                    and self._parse_wrapped_csv(self._parse_id_var),
649                )
650            if self._match_text_seq("PROJECTION", "POLICY"):
651                policy = self._parse_column()
652                return self.expression(
653                    exp.ProjectionPolicyColumnConstraint,
654                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
655                )
656            if self._match(TokenType.TAG):
657                return self._parse_tag()
658
659            return None
660
661        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
662            if self._match(TokenType.TAG):
663                return self._parse_tag()
664
665            return super()._parse_with_property()
666
667        def _parse_create(self) -> exp.Create | exp.Command:
668            expression = super()._parse_create()
669            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
670                # Replace the Table node with the enclosed Identifier
671                expression.this.replace(expression.this.this)
672
673            return expression
674
675        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
676        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
677        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
678            this = self._parse_var() or self._parse_type()
679
680            if not this:
681                return None
682
683            self._match(TokenType.COMMA)
684            expression = self._parse_bitwise()
685            this = map_date_part(this)
686            name = this.name.upper()
687
688            if name.startswith("EPOCH"):
689                if name == "EPOCH_MILLISECOND":
690                    scale = 10**3
691                elif name == "EPOCH_MICROSECOND":
692                    scale = 10**6
693                elif name == "EPOCH_NANOSECOND":
694                    scale = 10**9
695                else:
696                    scale = None
697
698                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
699                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
700
701                if scale:
702                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
703
704                return to_unix
705
706            return self.expression(exp.Extract, this=this, expression=expression)
707
708        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
709            if is_map:
710                # Keys are strings in Snowflake's objects, see also:
711                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
712                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
713                return self._parse_slice(self._parse_string())
714
715            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
716
717        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
718            lateral = super()._parse_lateral()
719            if not lateral:
720                return lateral
721
722            if isinstance(lateral.this, exp.Explode):
723                table_alias = lateral.args.get("alias")
724                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
725                if table_alias and not table_alias.args.get("columns"):
726                    table_alias.set("columns", columns)
727                elif not table_alias:
728                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
729
730            return lateral
731
732        def _parse_table_parts(
733            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
734        ) -> exp.Table:
735            # https://docs.snowflake.com/en/user-guide/querying-stage
736            if self._match(TokenType.STRING, advance=False):
737                table = self._parse_string()
738            elif self._match_text_seq("@", advance=False):
739                table = self._parse_location_path()
740            else:
741                table = None
742
743            if table:
744                file_format = None
745                pattern = None
746
747                wrapped = self._match(TokenType.L_PAREN)
748                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
749                    if self._match_text_seq("FILE_FORMAT", "=>"):
750                        file_format = self._parse_string() or super()._parse_table_parts(
751                            is_db_reference=is_db_reference
752                        )
753                    elif self._match_text_seq("PATTERN", "=>"):
754                        pattern = self._parse_string()
755                    else:
756                        break
757
758                    self._match(TokenType.COMMA)
759
760                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
761            else:
762                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
763
764            return table
765
766        def _parse_table(
767            self,
768            schema: bool = False,
769            joins: bool = False,
770            alias_tokens: t.Optional[t.Collection[TokenType]] = None,
771            parse_bracket: bool = False,
772            is_db_reference: bool = False,
773            parse_partition: bool = False,
774        ) -> t.Optional[exp.Expression]:
775            table = super()._parse_table(
776                schema=schema,
777                joins=joins,
778                alias_tokens=alias_tokens,
779                parse_bracket=parse_bracket,
780                is_db_reference=is_db_reference,
781                parse_partition=parse_partition,
782            )
783            if isinstance(table, exp.Table) and isinstance(table.this, exp.TableFromRows):
784                table_from_rows = table.this
785                for arg in exp.TableFromRows.arg_types:
786                    if arg != "this":
787                        table_from_rows.set(arg, table.args.get(arg))
788
789                table = table_from_rows
790
791            return table
792
793        def _parse_id_var(
794            self,
795            any_token: bool = True,
796            tokens: t.Optional[t.Collection[TokenType]] = None,
797        ) -> t.Optional[exp.Expression]:
798            if self._match_text_seq("IDENTIFIER", "("):
799                identifier = (
800                    super()._parse_id_var(any_token=any_token, tokens=tokens)
801                    or self._parse_string()
802                )
803                self._match_r_paren()
804                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
805
806            return super()._parse_id_var(any_token=any_token, tokens=tokens)
807
808        def _parse_show_snowflake(self, this: str) -> exp.Show:
809            scope = None
810            scope_kind = None
811
812            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
813            # which is syntactically valid but has no effect on the output
814            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
815
816            history = self._match_text_seq("HISTORY")
817
818            like = self._parse_string() if self._match(TokenType.LIKE) else None
819
820            if self._match(TokenType.IN):
821                if self._match_text_seq("ACCOUNT"):
822                    scope_kind = "ACCOUNT"
823                elif self._match_text_seq("CLASS"):
824                    scope_kind = "CLASS"
825                    scope = self._parse_table_parts()
826                elif self._match_text_seq("APPLICATION"):
827                    scope_kind = "APPLICATION"
828                    if self._match_text_seq("PACKAGE"):
829                        scope_kind += " PACKAGE"
830                    scope = self._parse_table_parts()
831                elif self._match_set(self.DB_CREATABLES):
832                    scope_kind = self._prev.text.upper()
833                    if self._curr:
834                        scope = self._parse_table_parts()
835                elif self._curr:
836                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
837                    scope = self._parse_table_parts()
838
839            return self.expression(
840                exp.Show,
841                **{
842                    "terse": terse,
843                    "this": this,
844                    "history": history,
845                    "like": like,
846                    "scope": scope,
847                    "scope_kind": scope_kind,
848                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
849                    "limit": self._parse_limit(),
850                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
851                    "privileges": self._match_text_seq("WITH", "PRIVILEGES")
852                    and self._parse_csv(lambda: self._parse_var(any_token=True, upper=True)),
853                },
854            )
855
856        def _parse_put(self) -> exp.Put | exp.Command:
857            if self._curr.token_type != TokenType.STRING:
858                return self._parse_as_command(self._prev)
859
860            return self.expression(
861                exp.Put,
862                this=self._parse_string(),
863                target=self._parse_location_path(),
864                properties=self._parse_properties(),
865            )
866
867        def _parse_get(self) -> exp.Get | exp.Command:
868            start = self._prev
869            target = self._parse_location_path()
870
871            # Parse as command if unquoted file path
872            if self._curr.token_type == TokenType.URI_START:
873                return self._parse_as_command(start)
874
875            return self.expression(
876                exp.Get,
877                this=self._parse_string(),
878                target=target,
879                properties=self._parse_properties(),
880            )
881
882        def _parse_location_property(self) -> exp.LocationProperty:
883            self._match(TokenType.EQ)
884            return self.expression(exp.LocationProperty, this=self._parse_location_path())
885
886        def _parse_file_location(self) -> t.Optional[exp.Expression]:
887            # Parse either a subquery or a staged file
888            return (
889                self._parse_select(table=True, parse_subquery_alias=False)
890                if self._match(TokenType.L_PAREN, advance=False)
891                else self._parse_table_parts()
892            )
893
894        def _parse_location_path(self) -> exp.Var:
895            start = self._curr
896            self._advance_any(ignore_reserved=True)
897
898            # We avoid consuming a comma token because external tables like @foo and @bar
899            # can be joined in a query with a comma separator, as well as closing paren
900            # in case of subqueries
901            while self._is_connected() and not self._match_set(
902                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
903            ):
904                self._advance_any(ignore_reserved=True)
905
906            return exp.var(self._find_sql(start, self._prev))
907
908        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
909            this = super()._parse_lambda_arg()
910
911            if not this:
912                return this
913
914            typ = self._parse_types()
915
916            if typ:
917                return self.expression(exp.Cast, this=this, to=typ)
918
919            return this
920
921        def _parse_foreign_key(self) -> exp.ForeignKey:
922            # inlineFK, the REFERENCES columns are implied
923            if self._match(TokenType.REFERENCES, advance=False):
924                return self.expression(exp.ForeignKey)
925
926            # outoflineFK, explicitly names the columns
927            return super()._parse_foreign_key()
928
929        def _parse_file_format_property(self) -> exp.FileFormatProperty:
930            self._match(TokenType.EQ)
931            if self._match(TokenType.L_PAREN, advance=False):
932                expressions = self._parse_wrapped_options()
933            else:
934                expressions = [self._parse_format_name()]
935
936            return self.expression(
937                exp.FileFormatProperty,
938                expressions=expressions,
939            )
940
941        def _parse_credentials_property(self) -> exp.CredentialsProperty:
942            return self.expression(
943                exp.CredentialsProperty,
944                expressions=self._parse_wrapped_options(),
945            )

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_VARIANT_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.SETTINGS: 'SETTINGS'>, <TokenType.UINT: 'UINT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.END: 'END'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.JSONB: 'JSONB'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.INT256: 'INT256'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.ANTI: 'ANTI'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.INT128: 'INT128'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.KILL: 'KILL'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.GET: 'GET'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.ASC: 'ASC'>, <TokenType.TAG: 'TAG'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.FALSE: 'FALSE'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.INT: 'INT'>, <TokenType.SINK: 'SINK'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.ASOF: 'ASOF'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.ROW: 'ROW'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.ALL: 'ALL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.VOID: 'VOID'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.BLOB: 'BLOB'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.SET: 'SET'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.CHAR: 'CHAR'>, <TokenType.TOP: 'TOP'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.STAGE: 'STAGE'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.IPV4: 'IPV4'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.DELETE: 'DELETE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.IPV6: 'IPV6'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.MERGE: 'MERGE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.LIST: 'LIST'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.RENAME: 'RENAME'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.UUID: 'UUID'>, <TokenType.LOAD: 'LOAD'>, <TokenType.RANGE: 'RANGE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.ANY: 'ANY'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.UINT256: 'UINT256'>, <TokenType.YEAR: 'YEAR'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.INDEX: 'INDEX'>, <TokenType.MAP: 'MAP'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.ROWS: 'ROWS'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.CUBE: 'CUBE'>, <TokenType.DESC: 'DESC'>, <TokenType.LEFT: 'LEFT'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.IS: 'IS'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.NESTED: 'NESTED'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.BIT: 'BIT'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.VAR: 'VAR'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATE32: 'DATE32'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.RING: 'RING'>, <TokenType.TEXT: 'TEXT'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TABLE: 'TABLE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.NAME: 'NAME'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.XML: 'XML'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.INET: 'INET'>, <TokenType.NULL: 'NULL'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.USE: 'USE'>, <TokenType.DATE: 'DATE'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.SOME: 'SOME'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FULL: 'FULL'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.POINT: 'POINT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.TIME: 'TIME'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.UINT128: 'UINT128'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.PUT: 'PUT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.CASE: 'CASE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.JSON: 'JSON'>, <TokenType.MODEL: 'MODEL'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.DETACH: 'DETACH'>, <TokenType.KEEP: 'KEEP'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.APPLY: 'APPLY'>, <TokenType.COPY: 'COPY'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.CACHE: 'CACHE'>, <TokenType.SCHEMA: 'SCHEMA'>}
TABLE_ALIAS_TOKENS = {<TokenType.SETTINGS: 'SETTINGS'>, <TokenType.UINT: 'UINT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.END: 'END'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.JSONB: 'JSONB'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.INT256: 'INT256'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.ANTI: 'ANTI'>, <TokenType.INT128: 'INT128'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.KILL: 'KILL'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.GET: 'GET'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.ASC: 'ASC'>, <TokenType.TAG: 'TAG'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.FALSE: 'FALSE'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.INT: 'INT'>, <TokenType.SINK: 'SINK'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.ROW: 'ROW'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.ALL: 'ALL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.VOID: 'VOID'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.BLOB: 'BLOB'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.SET: 'SET'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.CHAR: 'CHAR'>, <TokenType.TOP: 'TOP'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.STAGE: 'STAGE'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.IPV4: 'IPV4'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.DELETE: 'DELETE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.IPV6: 'IPV6'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.MERGE: 'MERGE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.LIST: 'LIST'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.RENAME: 'RENAME'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.UUID: 'UUID'>, <TokenType.LOAD: 'LOAD'>, <TokenType.RANGE: 'RANGE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.ANY: 'ANY'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.UINT256: 'UINT256'>, <TokenType.YEAR: 'YEAR'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.INDEX: 'INDEX'>, <TokenType.MAP: 'MAP'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.ROWS: 'ROWS'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.CUBE: 'CUBE'>, <TokenType.DESC: 'DESC'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.IS: 'IS'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.NESTED: 'NESTED'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.BIT: 'BIT'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.VAR: 'VAR'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATE32: 'DATE32'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.RING: 'RING'>, <TokenType.TEXT: 'TEXT'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TABLE: 'TABLE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.NAME: 'NAME'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.XML: 'XML'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.INET: 'INET'>, <TokenType.NULL: 'NULL'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.USE: 'USE'>, <TokenType.DATE: 'DATE'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.SOME: 'SOME'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.TRUE: 'TRUE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.POINT: 'POINT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.TIME: 'TIME'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.UINT128: 'UINT128'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.PUT: 'PUT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.CASE: 'CASE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.JSON: 'JSON'>, <TokenType.MODEL: 'MODEL'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.DETACH: 'DETACH'>, <TokenType.KEEP: 'KEEP'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.COPY: 'COPY'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.CACHE: 'CACHE'>, <TokenType.SCHEMA: 'SCHEMA'>}
COLON_PLACEHOLDER_TOKENS = {<TokenType.SETTINGS: 'SETTINGS'>, <TokenType.UINT: 'UINT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.END: 'END'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.JSONB: 'JSONB'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.INT256: 'INT256'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.ANTI: 'ANTI'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.INT128: 'INT128'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.KILL: 'KILL'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.GET: 'GET'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.ASC: 'ASC'>, <TokenType.TAG: 'TAG'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.FALSE: 'FALSE'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.INT: 'INT'>, <TokenType.SINK: 'SINK'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.ASOF: 'ASOF'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.ROW: 'ROW'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.ALL: 'ALL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.VOID: 'VOID'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.BLOB: 'BLOB'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.SET: 'SET'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.CHAR: 'CHAR'>, <TokenType.TOP: 'TOP'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.STAGE: 'STAGE'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.IPV4: 'IPV4'>, <TokenType.DELETE: 'DELETE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.IPV6: 'IPV6'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.MERGE: 'MERGE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.LIST: 'LIST'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.RENAME: 'RENAME'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.UUID: 'UUID'>, <TokenType.LOAD: 'LOAD'>, <TokenType.RANGE: 'RANGE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.ANY: 'ANY'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.UINT256: 'UINT256'>, <TokenType.YEAR: 'YEAR'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.INDEX: 'INDEX'>, <TokenType.MAP: 'MAP'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.ROWS: 'ROWS'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.CUBE: 'CUBE'>, <TokenType.DESC: 'DESC'>, <TokenType.LEFT: 'LEFT'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.IS: 'IS'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.NESTED: 'NESTED'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.BIT: 'BIT'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.VAR: 'VAR'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATE32: 'DATE32'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.RING: 'RING'>, <TokenType.TEXT: 'TEXT'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TABLE: 'TABLE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.NAME: 'NAME'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.XML: 'XML'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.INET: 'INET'>, <TokenType.NULL: 'NULL'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.USE: 'USE'>, <TokenType.DATE: 'DATE'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.SOME: 'SOME'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FULL: 'FULL'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.POINT: 'POINT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.TIME: 'TIME'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.UINT128: 'UINT128'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.PUT: 'PUT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.NUMBER: 'NUMBER'>, <TokenType.CASE: 'CASE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.JSON: 'JSON'>, <TokenType.MODEL: 'MODEL'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.DETACH: 'DETACH'>, <TokenType.KEEP: 'KEEP'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.APPLY: 'APPLY'>, <TokenType.COPY: 'COPY'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.CACHE: 'CACHE'>, <TokenType.SCHEMA: 'SCHEMA'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.And'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPLY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Apply'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONCAT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcatAgg'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Columns'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Contains'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CONVERT_TO_CHARSET': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConvertToCharset'>>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_SCHEMA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentSchema'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATE_BIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateBin'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exists'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FEATURES_AT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FeaturesAtTime'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'INT64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Int64'>>, 'IS_ASCII': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsAscii'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExists'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_B_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBObjectAgg'>>, 'J_S_O_N_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONCast'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractArray'>>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'J_S_O_N_VALUE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONValueArray'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function Snowflake.Parser.<lambda>>, 'LEN': <function Snowflake.Parser.<lambda>>, 'CHAR_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'CHARACTER_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAKE_INTERVAL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MakeInterval'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MEDIAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Median'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Or'>>, 'OVERLAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Overlay'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_EXTRACT_ALL': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SPLIT_PART': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SplitPart'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.String'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <function build_timetostr_or_tochar>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_DOUBLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDouble'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDatetime'>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNICODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unicode'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_SECONDS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixSeconds'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'XMLELEMENT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLElement'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'STRPOS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'CHARINDEX': <function Parser.<lambda>>, 'INSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'LOCATE': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY_CONSTRUCT': <function Snowflake.Parser.<lambda>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function _build_bitwise.<locals>._builder>, 'BIT_XOR': <function _build_bitwise.<locals>._builder>, 'BITOR': <function _build_bitwise.<locals>._builder>, 'BIT_OR': <function _build_bitwise.<locals>._builder>, 'BITSHIFTLEFT': <function _build_bitwise.<locals>._builder>, 'BIT_SHIFTLEFT': <function _build_bitwise.<locals>._builder>, 'BITSHIFTRIGHT': <function _build_bitwise.<locals>._builder>, 'BIT_SHIFTRIGHT': <function _build_bitwise.<locals>._builder>, 'BOOLXOR': <function _build_bitwise.<locals>._builder>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'EDITDISTANCE': <function Snowflake.Parser.<lambda>>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'HEX_DECODE_BINARY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_SUBSTR_ALL': <function _build_regexp_extract.<locals>._builder>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TABLE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TIMESTAMPNTZFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_NTZ_FROM_PARTS': <function build_timestamp_from_parts>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIME': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'ARG_MAX': <function Parser.<dictcomp>.<lambda>>, 'ARGMAX': <function Parser.<dictcomp>.<lambda>>, 'MAX_BY': <function Parser.<dictcomp>.<lambda>>, 'ARG_MIN': <function Parser.<dictcomp>.<lambda>>, 'ARGMIN': <function Parser.<dictcomp>.<lambda>>, 'MIN_BY': <function Parser.<dictcomp>.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CEIL': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'FLOOR': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'OVERLAY': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'XMLELEMENT': <function Parser.<lambda>>, 'XMLTABLE': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>, 'LISTAGG': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>}
RANGE_PARSERS = {<TokenType.AT_GT: 'AT_GT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.LT_AT: 'LT_AT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'AS': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SWAP': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.ANALYZE: 'ANALYZE'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.GRANT: 'GRANT'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UNPIVOT: 'UNPIVOT'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.GET: 'GET'>: <function Snowflake.Parser.<lambda>>, <TokenType.PUT: 'PUT'>: <function Snowflake.Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'ENVIRONMENT': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Snowflake.Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'CREDENTIALS': <function Snowflake.Parser.<lambda>>, 'FILE_FORMAT': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'DATABASES': <function _show_parser.<locals>._parse>, 'TERSE DATABASES': <function _show_parser.<locals>._parse>, 'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'STAGES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>, 'FILE FORMATS': <function _show_parser.<locals>._parse>, 'FUNCTIONS': <function _show_parser.<locals>._parse>, 'PROCEDURES': <function _show_parser.<locals>._parse>, 'WAREHOUSES': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WATERMARK': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'BUCKET': <function Parser.<lambda>>, 'TRUNCATE': <function Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.DOT: 'DOT'>, <TokenType.SLASH: 'SLASH'>, <TokenType.MOD: 'MOD'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'OBJECTS', 'SEQUENCES', 'UNIQUE KEYS', 'TABLES', 'IMPORTED KEYS', 'VIEWS'}
NON_TABLE_CREATABLES = {'TAG', 'WAREHOUSE', 'STREAMLIT', 'STORAGE INTEGRATION'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'DATABASES': {0: True}, 'TERSE': {'DATABASES': {0: True}, 'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'STAGES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}, 'FILE': {'FORMATS': {0: True}}, 'FUNCTIONS': {0: True}, 'PROCEDURES': {0: True}, 'WAREHOUSES': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
PROCEDURE_OPTIONS
EXECUTE_AS_OPTIONS
KEY_CONSTRAINT_OPTIONS
WINDOW_EXCLUDE_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
PRIVILEGE_FOLLOW_TOKENS
DESCRIBE_STYLES
ANALYZE_STYLES
ANALYZE_EXPRESSION_PARSERS
PARTITION_KEYWORDS
AMBIGUOUS_ALIAS_TOKENS
OPERATION_MODIFIERS
RECURSIVE_CTE_SEARCH_KIND
MODIFIABLES
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
OPTIONAL_ALIAS_TOKEN_CTE
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
parse_set_operation
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
947    class Tokenizer(tokens.Tokenizer):
948        STRING_ESCAPES = ["\\", "'"]
949        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
950        RAW_STRINGS = ["$$"]
951        COMMENTS = ["--", "//", ("/*", "*/")]
952        NESTED_COMMENTS = False
953
954        KEYWORDS = {
955            **tokens.Tokenizer.KEYWORDS,
956            "FILE://": TokenType.URI_START,
957            "BYTEINT": TokenType.INT,
958            "EXCLUDE": TokenType.EXCEPT,
959            "FILE FORMAT": TokenType.FILE_FORMAT,
960            "GET": TokenType.GET,
961            "ILIKE ANY": TokenType.ILIKE_ANY,
962            "LIKE ANY": TokenType.LIKE_ANY,
963            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
964            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
965            "MINUS": TokenType.EXCEPT,
966            "NCHAR VARYING": TokenType.VARCHAR,
967            "PUT": TokenType.PUT,
968            "REMOVE": TokenType.COMMAND,
969            "RM": TokenType.COMMAND,
970            "SAMPLE": TokenType.TABLE_SAMPLE,
971            "SQL_DOUBLE": TokenType.DOUBLE,
972            "SQL_VARCHAR": TokenType.VARCHAR,
973            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
974            "TAG": TokenType.TAG,
975            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
976            "TOP": TokenType.TOP,
977            "WAREHOUSE": TokenType.WAREHOUSE,
978            "STAGE": TokenType.STAGE,
979            "STREAMLIT": TokenType.STREAMLIT,
980        }
981        KEYWORDS.pop("/*+")
982
983        SINGLE_TOKENS = {
984            **tokens.Tokenizer.SINGLE_TOKENS,
985            "$": TokenType.PARAMETER,
986        }
987
988        VAR_SINGLE_TOKENS = {"$"}
989
990        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_SCHEMA': <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NAMESPACE': <TokenType.NAMESPACE: 'NAMESPACE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT256': <TokenType.INT256: 'INT256'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'UINT128': <TokenType.UINT128: 'UINT128'>, 'UINT256': <TokenType.UINT256: 'UINT256'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'DECIMAL256': <TokenType.DECIMAL256: 'DECIMAL256'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.ANALYZE: 'ANALYZE'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'FILE://': <TokenType.URI_START: 'URI_START'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'FILE FORMAT': <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, 'GET': <TokenType.GET: 'GET'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.PUT: 'PUT'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STAGE': <TokenType.STAGE: 'STAGE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.COMMAND: 'COMMAND'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FETCH: 'FETCH'>, <TokenType.RENAME: 'RENAME'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 992    class Generator(generator.Generator):
 993        PARAMETER_TOKEN = "$"
 994        MATCHED_BY_SOURCE = False
 995        SINGLE_STRING_INTERVAL = True
 996        JOIN_HINTS = False
 997        TABLE_HINTS = False
 998        QUERY_HINTS = False
 999        AGGREGATE_FILTER_SUPPORTED = False
1000        SUPPORTS_TABLE_COPY = False
1001        COLLATE_IS_FUNC = True
1002        LIMIT_ONLY_LITERALS = True
1003        JSON_KEY_VALUE_PAIR_SEP = ","
1004        INSERT_OVERWRITE = " OVERWRITE INTO"
1005        STRUCT_DELIMITER = ("(", ")")
1006        COPY_PARAMS_ARE_WRAPPED = False
1007        COPY_PARAMS_EQ_REQUIRED = True
1008        STAR_EXCEPT = "EXCLUDE"
1009        SUPPORTS_EXPLODING_PROJECTIONS = False
1010        ARRAY_CONCAT_IS_VAR_LEN = False
1011        SUPPORTS_CONVERT_TIMEZONE = True
1012        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
1013        SUPPORTS_MEDIAN = True
1014        ARRAY_SIZE_NAME = "ARRAY_SIZE"
1015
1016        TRANSFORMS = {
1017            **generator.Generator.TRANSFORMS,
1018            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1019            exp.ArgMax: rename_func("MAX_BY"),
1020            exp.ArgMin: rename_func("MIN_BY"),
1021            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
1022            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
1023            exp.AtTimeZone: lambda self, e: self.func(
1024                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
1025            ),
1026            exp.BitwiseOr: rename_func("BITOR"),
1027            exp.BitwiseXor: rename_func("BITXOR"),
1028            exp.BitwiseLeftShift: rename_func("BITSHIFTLEFT"),
1029            exp.BitwiseRightShift: rename_func("BITSHIFTRIGHT"),
1030            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
1031            exp.DateAdd: date_delta_sql("DATEADD"),
1032            exp.DateDiff: date_delta_sql("DATEDIFF"),
1033            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
1034            exp.DatetimeDiff: timestampdiff_sql,
1035            exp.DateStrToDate: datestrtodate_sql,
1036            exp.DayOfMonth: rename_func("DAYOFMONTH"),
1037            exp.DayOfWeek: rename_func("DAYOFWEEK"),
1038            exp.DayOfWeekIso: rename_func("DAYOFWEEKISO"),
1039            exp.DayOfYear: rename_func("DAYOFYEAR"),
1040            exp.Explode: rename_func("FLATTEN"),
1041            exp.Extract: lambda self, e: self.func(
1042                "DATE_PART", map_date_part(e.this, self.dialect), e.expression
1043            ),
1044            exp.FileFormatProperty: lambda self,
1045            e: f"FILE_FORMAT=({self.expressions(e, 'expressions', sep=' ')})",
1046            exp.FromTimeZone: lambda self, e: self.func(
1047                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
1048            ),
1049            exp.GenerateSeries: lambda self, e: self.func(
1050                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
1051            ),
1052            exp.GroupConcat: lambda self, e: groupconcat_sql(self, e, sep=""),
1053            exp.If: if_sql(name="IFF", false_value="NULL"),
1054            exp.JSONExtractArray: _json_extract_value_array_sql,
1055            exp.JSONExtractScalar: lambda self, e: self.func(
1056                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
1057            ),
1058            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
1059            exp.JSONPathRoot: lambda *_: "",
1060            exp.JSONValueArray: _json_extract_value_array_sql,
1061            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
1062                rename_func("EDITDISTANCE")
1063            ),
1064            exp.LocationProperty: lambda self, e: f"LOCATION={self.sql(e, 'this')}",
1065            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
1066            exp.LogicalOr: rename_func("BOOLOR_AGG"),
1067            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1068            exp.MakeInterval: no_make_interval_sql,
1069            exp.Max: max_or_greatest,
1070            exp.Min: min_or_least,
1071            exp.ParseJSON: lambda self, e: self.func(
1072                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
1073            ),
1074            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1075            exp.PercentileCont: transforms.preprocess(
1076                [transforms.add_within_group_for_percentiles]
1077            ),
1078            exp.PercentileDisc: transforms.preprocess(
1079                [transforms.add_within_group_for_percentiles]
1080            ),
1081            exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]),
1082            exp.RegexpExtract: _regexpextract_sql,
1083            exp.RegexpExtractAll: _regexpextract_sql,
1084            exp.RegexpILike: _regexpilike_sql,
1085            exp.Rand: rename_func("RANDOM"),
1086            exp.Select: transforms.preprocess(
1087                [
1088                    transforms.eliminate_window_clause,
1089                    transforms.eliminate_distinct_on,
1090                    transforms.explode_projection_to_unnest(),
1091                    transforms.eliminate_semi_and_anti_joins,
1092                    _transform_generate_date_array,
1093                ]
1094            ),
1095            exp.SHA: rename_func("SHA1"),
1096            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
1097            exp.StartsWith: rename_func("STARTSWITH"),
1098            exp.StrPosition: lambda self, e: strposition_sql(
1099                self, e, func_name="CHARINDEX", supports_position=True
1100            ),
1101            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
1102            exp.Stuff: rename_func("INSERT"),
1103            exp.TimeAdd: date_delta_sql("TIMEADD"),
1104            exp.Timestamp: no_timestamp_sql,
1105            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
1106            exp.TimestampDiff: lambda self, e: self.func(
1107                "TIMESTAMPDIFF", e.unit, e.expression, e.this
1108            ),
1109            exp.TimestampTrunc: timestamptrunc_sql(),
1110            exp.TimeStrToTime: timestrtotime_sql,
1111            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
1112            exp.ToArray: rename_func("TO_ARRAY"),
1113            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
1114            exp.ToDouble: rename_func("TO_DOUBLE"),
1115            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
1116            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
1117            exp.TsOrDsToDate: lambda self, e: self.func(
1118                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
1119            ),
1120            exp.TsOrDsToTime: lambda self, e: self.func(
1121                "TRY_TO_TIME" if e.args.get("safe") else "TO_TIME", e.this, self.format_time(e)
1122            ),
1123            exp.Unhex: rename_func("HEX_DECODE_BINARY"),
1124            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
1125            exp.Uuid: rename_func("UUID_STRING"),
1126            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1127            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
1128            exp.Xor: rename_func("BOOLXOR"),
1129        }
1130
1131        SUPPORTED_JSON_PATH_PARTS = {
1132            exp.JSONPathKey,
1133            exp.JSONPathRoot,
1134            exp.JSONPathSubscript,
1135        }
1136
1137        TYPE_MAPPING = {
1138            **generator.Generator.TYPE_MAPPING,
1139            exp.DataType.Type.NESTED: "OBJECT",
1140            exp.DataType.Type.STRUCT: "OBJECT",
1141            exp.DataType.Type.BIGDECIMAL: "DOUBLE",
1142        }
1143
1144        TOKEN_MAPPING = {
1145            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
1146        }
1147
1148        PROPERTIES_LOCATION = {
1149            **generator.Generator.PROPERTIES_LOCATION,
1150            exp.CredentialsProperty: exp.Properties.Location.POST_WITH,
1151            exp.LocationProperty: exp.Properties.Location.POST_WITH,
1152            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1153            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
1154            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1155        }
1156
1157        UNSUPPORTED_VALUES_EXPRESSIONS = {
1158            exp.Map,
1159            exp.StarMap,
1160            exp.Struct,
1161            exp.VarMap,
1162        }
1163
1164        RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS = (exp.ArrayAgg,)
1165
1166        def with_properties(self, properties: exp.Properties) -> str:
1167            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1168
1169        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1170            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1171                values_as_table = False
1172
1173            return super().values_sql(expression, values_as_table=values_as_table)
1174
1175        def datatype_sql(self, expression: exp.DataType) -> str:
1176            expressions = expression.expressions
1177            if (
1178                expressions
1179                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1180                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1181            ):
1182                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1183                return "OBJECT"
1184
1185            return super().datatype_sql(expression)
1186
1187        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1188            return self.func(
1189                "TO_NUMBER",
1190                expression.this,
1191                expression.args.get("format"),
1192                expression.args.get("precision"),
1193                expression.args.get("scale"),
1194            )
1195
1196        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1197            milli = expression.args.get("milli")
1198            if milli is not None:
1199                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1200                expression.set("nano", milli_to_nano)
1201
1202            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1203
1204        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1205            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1206                return self.func("TO_GEOGRAPHY", expression.this)
1207            if expression.is_type(exp.DataType.Type.GEOMETRY):
1208                return self.func("TO_GEOMETRY", expression.this)
1209
1210            return super().cast_sql(expression, safe_prefix=safe_prefix)
1211
1212        def trycast_sql(self, expression: exp.TryCast) -> str:
1213            value = expression.this
1214
1215            if value.type is None:
1216                from sqlglot.optimizer.annotate_types import annotate_types
1217
1218                value = annotate_types(value, dialect=self.dialect)
1219
1220            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1221                return super().trycast_sql(expression)
1222
1223            # TRY_CAST only works for string values in Snowflake
1224            return self.cast_sql(expression)
1225
1226        def log_sql(self, expression: exp.Log) -> str:
1227            if not expression.expression:
1228                return self.func("LN", expression.this)
1229
1230            return super().log_sql(expression)
1231
1232        def unnest_sql(self, expression: exp.Unnest) -> str:
1233            unnest_alias = expression.args.get("alias")
1234            offset = expression.args.get("offset")
1235
1236            unnest_alias_columns = unnest_alias.columns if unnest_alias else []
1237            value = seq_get(unnest_alias_columns, 0) or exp.to_identifier("value")
1238
1239            columns = [
1240                exp.to_identifier("seq"),
1241                exp.to_identifier("key"),
1242                exp.to_identifier("path"),
1243                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1244                value,
1245                exp.to_identifier("this"),
1246            ]
1247
1248            if unnest_alias:
1249                unnest_alias.set("columns", columns)
1250            else:
1251                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1252
1253            table_input = self.sql(expression.expressions[0])
1254            if not table_input.startswith("INPUT =>"):
1255                table_input = f"INPUT => {table_input}"
1256
1257            explode = f"TABLE(FLATTEN({table_input}))"
1258            alias = self.sql(unnest_alias)
1259            alias = f" AS {alias}" if alias else ""
1260            value = "" if isinstance(expression.parent, (exp.From, exp.Join)) else f"{value} FROM "
1261
1262            return f"{value}{explode}{alias}"
1263
1264        def show_sql(self, expression: exp.Show) -> str:
1265            terse = "TERSE " if expression.args.get("terse") else ""
1266            history = " HISTORY" if expression.args.get("history") else ""
1267            like = self.sql(expression, "like")
1268            like = f" LIKE {like}" if like else ""
1269
1270            scope = self.sql(expression, "scope")
1271            scope = f" {scope}" if scope else ""
1272
1273            scope_kind = self.sql(expression, "scope_kind")
1274            if scope_kind:
1275                scope_kind = f" IN {scope_kind}"
1276
1277            starts_with = self.sql(expression, "starts_with")
1278            if starts_with:
1279                starts_with = f" STARTS WITH {starts_with}"
1280
1281            limit = self.sql(expression, "limit")
1282
1283            from_ = self.sql(expression, "from")
1284            if from_:
1285                from_ = f" FROM {from_}"
1286
1287            privileges = self.expressions(expression, key="privileges", flat=True)
1288            privileges = f" WITH PRIVILEGES {privileges}" if privileges else ""
1289
1290            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}{privileges}"
1291
1292        def describe_sql(self, expression: exp.Describe) -> str:
1293            # Default to table if kind is unknown
1294            kind_value = expression.args.get("kind") or "TABLE"
1295            kind = f" {kind_value}" if kind_value else ""
1296            this = f" {self.sql(expression, 'this')}"
1297            expressions = self.expressions(expression, flat=True)
1298            expressions = f" {expressions}" if expressions else ""
1299            return f"DESCRIBE{kind}{this}{expressions}"
1300
1301        def generatedasidentitycolumnconstraint_sql(
1302            self, expression: exp.GeneratedAsIdentityColumnConstraint
1303        ) -> str:
1304            start = expression.args.get("start")
1305            start = f" START {start}" if start else ""
1306            increment = expression.args.get("increment")
1307            increment = f" INCREMENT {increment}" if increment else ""
1308            return f"AUTOINCREMENT{start}{increment}"
1309
1310        def cluster_sql(self, expression: exp.Cluster) -> str:
1311            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1312
1313        def struct_sql(self, expression: exp.Struct) -> str:
1314            keys = []
1315            values = []
1316
1317            for i, e in enumerate(expression.expressions):
1318                if isinstance(e, exp.PropertyEQ):
1319                    keys.append(
1320                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1321                    )
1322                    values.append(e.expression)
1323                else:
1324                    keys.append(exp.Literal.string(f"_{i}"))
1325                    values.append(e)
1326
1327            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1328
1329        @unsupported_args("weight", "accuracy")
1330        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1331            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1332
1333        def alterset_sql(self, expression: exp.AlterSet) -> str:
1334            exprs = self.expressions(expression, flat=True)
1335            exprs = f" {exprs}" if exprs else ""
1336            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1337            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1338            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1339            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1340            tag = self.expressions(expression, key="tag", flat=True)
1341            tag = f" TAG {tag}" if tag else ""
1342
1343            return f"SET{exprs}{file_format}{copy_options}{tag}"
1344
1345        def strtotime_sql(self, expression: exp.StrToTime):
1346            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1347            return self.func(
1348                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1349            )
1350
1351        def timestampsub_sql(self, expression: exp.TimestampSub):
1352            return self.sql(
1353                exp.TimestampAdd(
1354                    this=expression.this,
1355                    expression=expression.expression * -1,
1356                    unit=expression.unit,
1357                )
1358            )
1359
1360        def jsonextract_sql(self, expression: exp.JSONExtract):
1361            this = expression.this
1362
1363            # JSON strings are valid coming from other dialects such as BQ
1364            return self.func(
1365                "GET_PATH",
1366                exp.ParseJSON(this=this) if this.is_string else this,
1367                expression.expression,
1368            )
1369
1370        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1371            this = expression.this
1372            if not isinstance(this, exp.TsOrDsToTimestamp):
1373                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1374
1375            return self.func("TO_CHAR", this, self.format_time(expression))
1376
1377        def datesub_sql(self, expression: exp.DateSub) -> str:
1378            value = expression.expression
1379            if value:
1380                value.replace(value * (-1))
1381            else:
1382                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1383
1384            return date_delta_sql("DATEADD")(self, expression)
1385
1386        def select_sql(self, expression: exp.Select) -> str:
1387            limit = expression.args.get("limit")
1388            offset = expression.args.get("offset")
1389            if offset and not limit:
1390                expression.limit(exp.Null(), copy=False)
1391            return super().select_sql(expression)
1392
1393        def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
1394            is_materialized = expression.find(exp.MaterializedProperty)
1395            copy_grants_property = expression.find(exp.CopyGrantsProperty)
1396
1397            if expression.kind == "VIEW" and is_materialized and copy_grants_property:
1398                # For materialized views, COPY GRANTS is located *before* the columns list
1399                # This is in contrast to normal views where COPY GRANTS is located *after* the columns list
1400                # We default CopyGrantsProperty to POST_SCHEMA which means we need to output it POST_NAME if a materialized view is detected
1401                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-materialized-view#syntax
1402                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-view#syntax
1403                post_schema_properties = locations[exp.Properties.Location.POST_SCHEMA]
1404                post_schema_properties.pop(post_schema_properties.index(copy_grants_property))
1405
1406                this_name = self.sql(expression.this, "this")
1407                copy_grants = self.sql(copy_grants_property)
1408                this_schema = self.schema_columns_sql(expression.this)
1409                this_schema = f"{self.sep()}{this_schema}" if this_schema else ""
1410
1411                return f"{this_name}{self.sep()}{copy_grants}{this_schema}"
1412
1413            return super().createable_sql(expression, locations)
1414
1415        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
1416            this = expression.this
1417
1418            # If an ORDER BY clause is present, we need to remove it from ARRAY_AGG
1419            # and add it later as part of the WITHIN GROUP clause
1420            order = this if isinstance(this, exp.Order) else None
1421            if order:
1422                expression.set("this", order.this.pop())
1423
1424            expr_sql = super().arrayagg_sql(expression)
1425
1426            if order:
1427                expr_sql = self.sql(exp.WithinGroup(this=expr_sql, expression=order))
1428
1429            return expr_sql
1430
1431        def array_sql(self, expression: exp.Array) -> str:
1432            expressions = expression.expressions
1433
1434            first_expr = seq_get(expressions, 0)
1435            if isinstance(first_expr, exp.Select):
1436                # SELECT AS STRUCT foo AS alias_foo -> ARRAY_AGG(OBJECT_CONSTRUCT('alias_foo', foo))
1437                if first_expr.text("kind").upper() == "STRUCT":
1438                    object_construct_args = []
1439                    for expr in first_expr.expressions:
1440                        # Alias case: SELECT AS STRUCT foo AS alias_foo -> OBJECT_CONSTRUCT('alias_foo', foo)
1441                        # Column case: SELECT AS STRUCT foo -> OBJECT_CONSTRUCT('foo', foo)
1442                        name = expr.this if isinstance(expr, exp.Alias) else expr
1443
1444                        object_construct_args.extend([exp.Literal.string(expr.alias_or_name), name])
1445
1446                    array_agg = exp.ArrayAgg(
1447                        this=_build_object_construct(args=object_construct_args)
1448                    )
1449
1450                    first_expr.set("kind", None)
1451                    first_expr.set("expressions", [array_agg])
1452
1453                    return self.sql(first_expr.subquery())
1454
1455            return inline_array_sql(self, expression)

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
SUPPORTS_EXPLODING_PROJECTIONS = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_CONVERT_TIMEZONE = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_MEDIAN = True
ARRAY_SIZE_NAME = 'ARRAY_SIZE'
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AnalyzeColumns'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AnalyzeWith'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Ceil'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConvertToCharset'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CredentialsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EnviromentProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Floor'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Get'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Int64'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByBucket'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PartitionByTruncate'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Put'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SwapTable'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Tags'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UsingTemplateProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UsingData'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithProcedureOptions'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ForceProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.BitwiseLeftShift'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.BitwiseRightShift'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeDiff'>: <function timestampdiff_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeekIso'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.FileFormatProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtractArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONValueArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.Levenshtein'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.MakeInterval'>: <function no_make_interval_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpExtract'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpExtractAll'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.Timestamp'>: <function no_timestamp_sql>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToDouble'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.DATETIME2: 'DATETIME2'>: 'TIMESTAMP', <Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.BLOB: 'BLOB'>: 'VARBINARY', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.SMALLDATETIME: 'SMALLDATETIME'>: 'TIMESTAMP', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'DOUBLE'}
TOKEN_MAPPING = {<TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>: 'AUTOINCREMENT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EncodeProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EnviromentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.IncludeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StorageHandlerProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Tags'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.UsingTemplateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithProcedureOptions'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ForceProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.CredentialsProperty'>: <Location.POST_WITH: 'POST_WITH'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.Map'>, <class 'sqlglot.expressions.VarMap'>, <class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.StarMap'>}
RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS = (<class 'sqlglot.expressions.ArrayAgg'>,)
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
1166        def with_properties(self, properties: exp.Properties) -> str:
1167            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
1169        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1170            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1171                values_as_table = False
1172
1173            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
1175        def datatype_sql(self, expression: exp.DataType) -> str:
1176            expressions = expression.expressions
1177            if (
1178                expressions
1179                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1180                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1181            ):
1182                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1183                return "OBJECT"
1184
1185            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
1187        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1188            return self.func(
1189                "TO_NUMBER",
1190                expression.this,
1191                expression.args.get("format"),
1192                expression.args.get("precision"),
1193                expression.args.get("scale"),
1194            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
1196        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1197            milli = expression.args.get("milli")
1198            if milli is not None:
1199                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1200                expression.set("nano", milli_to_nano)
1201
1202            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
1204        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1205            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1206                return self.func("TO_GEOGRAPHY", expression.this)
1207            if expression.is_type(exp.DataType.Type.GEOMETRY):
1208                return self.func("TO_GEOMETRY", expression.this)
1209
1210            return super().cast_sql(expression, safe_prefix=safe_prefix)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
1212        def trycast_sql(self, expression: exp.TryCast) -> str:
1213            value = expression.this
1214
1215            if value.type is None:
1216                from sqlglot.optimizer.annotate_types import annotate_types
1217
1218                value = annotate_types(value, dialect=self.dialect)
1219
1220            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1221                return super().trycast_sql(expression)
1222
1223            # TRY_CAST only works for string values in Snowflake
1224            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
1226        def log_sql(self, expression: exp.Log) -> str:
1227            if not expression.expression:
1228                return self.func("LN", expression.this)
1229
1230            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
1232        def unnest_sql(self, expression: exp.Unnest) -> str:
1233            unnest_alias = expression.args.get("alias")
1234            offset = expression.args.get("offset")
1235
1236            unnest_alias_columns = unnest_alias.columns if unnest_alias else []
1237            value = seq_get(unnest_alias_columns, 0) or exp.to_identifier("value")
1238
1239            columns = [
1240                exp.to_identifier("seq"),
1241                exp.to_identifier("key"),
1242                exp.to_identifier("path"),
1243                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1244                value,
1245                exp.to_identifier("this"),
1246            ]
1247
1248            if unnest_alias:
1249                unnest_alias.set("columns", columns)
1250            else:
1251                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1252
1253            table_input = self.sql(expression.expressions[0])
1254            if not table_input.startswith("INPUT =>"):
1255                table_input = f"INPUT => {table_input}"
1256
1257            explode = f"TABLE(FLATTEN({table_input}))"
1258            alias = self.sql(unnest_alias)
1259            alias = f" AS {alias}" if alias else ""
1260            value = "" if isinstance(expression.parent, (exp.From, exp.Join)) else f"{value} FROM "
1261
1262            return f"{value}{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
1264        def show_sql(self, expression: exp.Show) -> str:
1265            terse = "TERSE " if expression.args.get("terse") else ""
1266            history = " HISTORY" if expression.args.get("history") else ""
1267            like = self.sql(expression, "like")
1268            like = f" LIKE {like}" if like else ""
1269
1270            scope = self.sql(expression, "scope")
1271            scope = f" {scope}" if scope else ""
1272
1273            scope_kind = self.sql(expression, "scope_kind")
1274            if scope_kind:
1275                scope_kind = f" IN {scope_kind}"
1276
1277            starts_with = self.sql(expression, "starts_with")
1278            if starts_with:
1279                starts_with = f" STARTS WITH {starts_with}"
1280
1281            limit = self.sql(expression, "limit")
1282
1283            from_ = self.sql(expression, "from")
1284            if from_:
1285                from_ = f" FROM {from_}"
1286
1287            privileges = self.expressions(expression, key="privileges", flat=True)
1288            privileges = f" WITH PRIVILEGES {privileges}" if privileges else ""
1289
1290            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}{privileges}"
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1292        def describe_sql(self, expression: exp.Describe) -> str:
1293            # Default to table if kind is unknown
1294            kind_value = expression.args.get("kind") or "TABLE"
1295            kind = f" {kind_value}" if kind_value else ""
1296            this = f" {self.sql(expression, 'this')}"
1297            expressions = self.expressions(expression, flat=True)
1298            expressions = f" {expressions}" if expressions else ""
1299            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1301        def generatedasidentitycolumnconstraint_sql(
1302            self, expression: exp.GeneratedAsIdentityColumnConstraint
1303        ) -> str:
1304            start = expression.args.get("start")
1305            start = f" START {start}" if start else ""
1306            increment = expression.args.get("increment")
1307            increment = f" INCREMENT {increment}" if increment else ""
1308            return f"AUTOINCREMENT{start}{increment}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1310        def cluster_sql(self, expression: exp.Cluster) -> str:
1311            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1313        def struct_sql(self, expression: exp.Struct) -> str:
1314            keys = []
1315            values = []
1316
1317            for i, e in enumerate(expression.expressions):
1318                if isinstance(e, exp.PropertyEQ):
1319                    keys.append(
1320                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1321                    )
1322                    values.append(e.expression)
1323                else:
1324                    keys.append(exp.Literal.string(f"_{i}"))
1325                    values.append(e)
1326
1327            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
@unsupported_args('weight', 'accuracy')
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1329        @unsupported_args("weight", "accuracy")
1330        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1331            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1333        def alterset_sql(self, expression: exp.AlterSet) -> str:
1334            exprs = self.expressions(expression, flat=True)
1335            exprs = f" {exprs}" if exprs else ""
1336            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1337            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1338            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1339            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1340            tag = self.expressions(expression, key="tag", flat=True)
1341            tag = f" TAG {tag}" if tag else ""
1342
1343            return f"SET{exprs}{file_format}{copy_options}{tag}"
def strtotime_sql(self, expression: sqlglot.expressions.StrToTime):
1345        def strtotime_sql(self, expression: exp.StrToTime):
1346            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1347            return self.func(
1348                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1349            )
def timestampsub_sql(self, expression: sqlglot.expressions.TimestampSub):
1351        def timestampsub_sql(self, expression: exp.TimestampSub):
1352            return self.sql(
1353                exp.TimestampAdd(
1354                    this=expression.this,
1355                    expression=expression.expression * -1,
1356                    unit=expression.unit,
1357                )
1358            )
def jsonextract_sql(self, expression: sqlglot.expressions.JSONExtract):
1360        def jsonextract_sql(self, expression: exp.JSONExtract):
1361            this = expression.this
1362
1363            # JSON strings are valid coming from other dialects such as BQ
1364            return self.func(
1365                "GET_PATH",
1366                exp.ParseJSON(this=this) if this.is_string else this,
1367                expression.expression,
1368            )
def timetostr_sql(self, expression: sqlglot.expressions.TimeToStr) -> str:
1370        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1371            this = expression.this
1372            if not isinstance(this, exp.TsOrDsToTimestamp):
1373                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1374
1375            return self.func("TO_CHAR", this, self.format_time(expression))
def datesub_sql(self, expression: sqlglot.expressions.DateSub) -> str:
1377        def datesub_sql(self, expression: exp.DateSub) -> str:
1378            value = expression.expression
1379            if value:
1380                value.replace(value * (-1))
1381            else:
1382                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1383
1384            return date_delta_sql("DATEADD")(self, expression)
def select_sql(self, expression: sqlglot.expressions.Select) -> str:
1386        def select_sql(self, expression: exp.Select) -> str:
1387            limit = expression.args.get("limit")
1388            offset = expression.args.get("offset")
1389            if offset and not limit:
1390                expression.limit(exp.Null(), copy=False)
1391            return super().select_sql(expression)
def createable_sql( self, expression: sqlglot.expressions.Create, locations: DefaultDict) -> str:
1393        def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
1394            is_materialized = expression.find(exp.MaterializedProperty)
1395            copy_grants_property = expression.find(exp.CopyGrantsProperty)
1396
1397            if expression.kind == "VIEW" and is_materialized and copy_grants_property:
1398                # For materialized views, COPY GRANTS is located *before* the columns list
1399                # This is in contrast to normal views where COPY GRANTS is located *after* the columns list
1400                # We default CopyGrantsProperty to POST_SCHEMA which means we need to output it POST_NAME if a materialized view is detected
1401                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-materialized-view#syntax
1402                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-view#syntax
1403                post_schema_properties = locations[exp.Properties.Location.POST_SCHEMA]
1404                post_schema_properties.pop(post_schema_properties.index(copy_grants_property))
1405
1406                this_name = self.sql(expression.this, "this")
1407                copy_grants = self.sql(copy_grants_property)
1408                this_schema = self.schema_columns_sql(expression.this)
1409                this_schema = f"{self.sep()}{this_schema}" if this_schema else ""
1410
1411                return f"{this_name}{self.sep()}{copy_grants}{this_schema}"
1412
1413            return super().createable_sql(expression, locations)
def arrayagg_sql(self, expression: sqlglot.expressions.ArrayAgg) -> str:
1415        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
1416            this = expression.this
1417
1418            # If an ORDER BY clause is present, we need to remove it from ARRAY_AGG
1419            # and add it later as part of the WITHIN GROUP clause
1420            order = this if isinstance(this, exp.Order) else None
1421            if order:
1422                expression.set("this", order.this.pop())
1423
1424            expr_sql = super().arrayagg_sql(expression)
1425
1426            if order:
1427                expr_sql = self.sql(exp.WithinGroup(this=expr_sql, expression=order))
1428
1429            return expr_sql
def array_sql(self, expression: sqlglot.expressions.Array) -> str:
1431        def array_sql(self, expression: exp.Array) -> str:
1432            expressions = expression.expressions
1433
1434            first_expr = seq_get(expressions, 0)
1435            if isinstance(first_expr, exp.Select):
1436                # SELECT AS STRUCT foo AS alias_foo -> ARRAY_AGG(OBJECT_CONSTRUCT('alias_foo', foo))
1437                if first_expr.text("kind").upper() == "STRUCT":
1438                    object_construct_args = []
1439                    for expr in first_expr.expressions:
1440                        # Alias case: SELECT AS STRUCT foo AS alias_foo -> OBJECT_CONSTRUCT('alias_foo', foo)
1441                        # Column case: SELECT AS STRUCT foo -> OBJECT_CONSTRUCT('foo', foo)
1442                        name = expr.this if isinstance(expr, exp.Alias) else expr
1443
1444                        object_construct_args.extend([exp.Literal.string(expr.alias_or_name), name])
1445
1446                    array_agg = exp.ArrayAgg(
1447                        this=_build_object_construct(args=object_construct_args)
1448                    )
1449
1450                    first_expr.set("kind", None)
1451                    first_expr.set("expressions", [array_agg])
1452
1453                    return self.sql(first_expr.subquery())
1454
1455            return inline_array_sql(self, expression)
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'windows': <function Generator.<lambda>>, 'qualify': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SUPPORTS_WINDOW_EXCLUDE
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
SUPPORTS_UNIX_SECONDS
PARSE_JSON_NAME
ALTER_SET_TYPE
ARRAY_SIZE_DIM_REQUIRED
TIME_PART_SINGULARS
NAMED_PLACEHOLDER_TOKEN
EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
limitoptions_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablefromrows_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterindex_sql
alterdiststyle_sql
altersortkey_sql
alterrename_sql
renamecolumn_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
safedivide_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
jsoncast_sql
try_sql
use_sql
binary
ceil_floor
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
whens_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
uniquekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodatetime_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonextractquote_sql
jsonexists_sql
apply_sql
grant_sql
grantprivilege_sql
grantprincipal_sql
columns_sql
overlay_sql
todouble_sql
string_sql
median_sql
overflowtruncatebehavior_sql
unixseconds_sql
arraysize_sql
attach_sql
detach_sql
attachoption_sql
featuresattime_sql
watermarkcolumnconstraint_sql
encodeproperty_sql
includeproperty_sql
xmlelement_sql
partitionbyrangeproperty_sql
partitionbyrangepropertydynamic_sql
unpivotcolumns_sql
analyzesample_sql
analyzestatistics_sql
analyzehistogram_sql
analyzedelete_sql
analyzelistchainedrows_sql
analyzevalidate_sql
analyze_sql
xmltable_sql
xmlnamespace_sql
export_sql
declare_sql
declareitem_sql
recursivewithsearch_sql
parameterizedagg_sql
anonymousaggfunc_sql
combinedaggfunc_sql
combinedparameterizedagg_sql
get_put_sql
translatecharacters_sql