Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    build_default_decimal_type,
  11    build_timestamp_from_parts,
  12    date_delta_sql,
  13    date_trunc_to_time,
  14    datestrtodate_sql,
  15    build_formatted_time,
  16    if_sql,
  17    inline_array_sql,
  18    max_or_greatest,
  19    min_or_least,
  20    rename_func,
  21    timestamptrunc_sql,
  22    timestrtotime_sql,
  23    var_map_sql,
  24    map_date_part,
  25    no_safe_divide_sql,
  26    no_timestamp_sql,
  27    timestampdiff_sql,
  28    no_make_interval_sql,
  29)
  30from sqlglot.generator import unsupported_args
  31from sqlglot.helper import flatten, is_float, is_int, seq_get
  32from sqlglot.tokens import TokenType
  33
  34if t.TYPE_CHECKING:
  35    from sqlglot._typing import E
  36
  37
  38# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  39def _build_datetime(
  40    name: str, kind: exp.DataType.Type, safe: bool = False
  41) -> t.Callable[[t.List], exp.Func]:
  42    def _builder(args: t.List) -> exp.Func:
  43        value = seq_get(args, 0)
  44        int_value = value is not None and is_int(value.name)
  45
  46        if isinstance(value, exp.Literal):
  47            # Converts calls like `TO_TIME('01:02:03')` into casts
  48            if len(args) == 1 and value.is_string and not int_value:
  49                return (
  50                    exp.TryCast(this=value, to=exp.DataType.build(kind))
  51                    if safe
  52                    else exp.cast(value, kind)
  53                )
  54
  55            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  56            # cases so we can transpile them, since they're relatively common
  57            if kind == exp.DataType.Type.TIMESTAMP:
  58                if int_value and not safe:
  59                    # TRY_TO_TIMESTAMP('integer') is not parsed into exp.UnixToTime as
  60                    # it's not easily transpilable
  61                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  62                if not is_float(value.this):
  63                    expr = build_formatted_time(exp.StrToTime, "snowflake")(args)
  64                    expr.set("safe", safe)
  65                    return expr
  66
  67        if kind == exp.DataType.Type.DATE and not int_value:
  68            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  69            formatted_exp.set("safe", safe)
  70            return formatted_exp
  71
  72        return exp.Anonymous(this=name, expressions=args)
  73
  74    return _builder
  75
  76
  77def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  78    expression = parser.build_var_map(args)
  79
  80    if isinstance(expression, exp.StarMap):
  81        return expression
  82
  83    return exp.Struct(
  84        expressions=[
  85            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  86        ]
  87    )
  88
  89
  90def _build_datediff(args: t.List) -> exp.DateDiff:
  91    return exp.DateDiff(
  92        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  93    )
  94
  95
  96def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
  97    def _builder(args: t.List) -> E:
  98        return expr_type(
  99            this=seq_get(args, 2),
 100            expression=seq_get(args, 1),
 101            unit=map_date_part(seq_get(args, 0)),
 102        )
 103
 104    return _builder
 105
 106
 107# https://docs.snowflake.com/en/sql-reference/functions/div0
 108def _build_if_from_div0(args: t.List) -> exp.If:
 109    lhs = exp._wrap(seq_get(args, 0), exp.Binary)
 110    rhs = exp._wrap(seq_get(args, 1), exp.Binary)
 111
 112    cond = exp.EQ(this=rhs, expression=exp.Literal.number(0)).and_(
 113        exp.Is(this=lhs, expression=exp.null()).not_()
 114    )
 115    true = exp.Literal.number(0)
 116    false = exp.Div(this=lhs, expression=rhs)
 117    return exp.If(this=cond, true=true, false=false)
 118
 119
 120# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 121def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 122    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 123    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 124
 125
 126# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 127def _build_if_from_nullifzero(args: t.List) -> exp.If:
 128    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 129    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 130
 131
 132def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 133    flag = expression.text("flag")
 134
 135    if "i" not in flag:
 136        flag += "i"
 137
 138    return self.func(
 139        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 140    )
 141
 142
 143def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 144    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 145
 146    if not regexp_replace.args.get("replacement"):
 147        regexp_replace.set("replacement", exp.Literal.string(""))
 148
 149    return regexp_replace
 150
 151
 152def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 153    def _parse(self: Snowflake.Parser) -> exp.Show:
 154        return self._parse_show_snowflake(*args, **kwargs)
 155
 156    return _parse
 157
 158
 159def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 160    trunc = date_trunc_to_time(args)
 161    trunc.set("unit", map_date_part(trunc.args["unit"]))
 162    return trunc
 163
 164
 165def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 166    """
 167    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 168    so we need to unqualify them.
 169
 170    Example:
 171        >>> from sqlglot import parse_one
 172        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 173        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 174        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 175    """
 176    if isinstance(expression, exp.Pivot) and expression.unpivot:
 177        expression = transforms.unqualify_columns(expression)
 178
 179    return expression
 180
 181
 182def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 183    assert isinstance(expression, exp.Create)
 184
 185    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 186        if expression.this in exp.DataType.NESTED_TYPES:
 187            expression.set("expressions", None)
 188        return expression
 189
 190    props = expression.args.get("properties")
 191    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 192        for schema_expression in expression.this.expressions:
 193            if isinstance(schema_expression, exp.ColumnDef):
 194                column_type = schema_expression.kind
 195                if isinstance(column_type, exp.DataType):
 196                    column_type.transform(_flatten_structured_type, copy=False)
 197
 198    return expression
 199
 200
 201def _unnest_generate_date_array(unnest: exp.Unnest) -> None:
 202    generate_date_array = unnest.expressions[0]
 203    start = generate_date_array.args.get("start")
 204    end = generate_date_array.args.get("end")
 205    step = generate_date_array.args.get("step")
 206
 207    if not start or not end or not isinstance(step, exp.Interval) or step.name != "1":
 208        return
 209
 210    unit = step.args.get("unit")
 211
 212    unnest_alias = unnest.args.get("alias")
 213    if unnest_alias:
 214        unnest_alias = unnest_alias.copy()
 215        sequence_value_name = seq_get(unnest_alias.columns, 0) or "value"
 216    else:
 217        sequence_value_name = "value"
 218
 219    # We'll add the next sequence value to the starting date and project the result
 220    date_add = _build_date_time_add(exp.DateAdd)(
 221        [unit, exp.cast(sequence_value_name, "int"), exp.cast(start, "date")]
 222    ).as_(sequence_value_name)
 223
 224    # We use DATEDIFF to compute the number of sequence values needed
 225    number_sequence = Snowflake.Parser.FUNCTIONS["ARRAY_GENERATE_RANGE"](
 226        [exp.Literal.number(0), _build_datediff([unit, start, end]) + 1]
 227    )
 228
 229    unnest.set("expressions", [number_sequence])
 230    unnest.replace(exp.select(date_add).from_(unnest.copy()).subquery(unnest_alias))
 231
 232
 233def _transform_generate_date_array(expression: exp.Expression) -> exp.Expression:
 234    if isinstance(expression, exp.Select):
 235        for generate_date_array in expression.find_all(exp.GenerateDateArray):
 236            parent = generate_date_array.parent
 237
 238            # If GENERATE_DATE_ARRAY is used directly as an array (e.g passed into ARRAY_LENGTH), the transformed Snowflake
 239            # query is the following (it'll be unnested properly on the next iteration due to copy):
 240            # SELECT ref(GENERATE_DATE_ARRAY(...)) -> SELECT ref((SELECT ARRAY_AGG(*) FROM UNNEST(GENERATE_DATE_ARRAY(...))))
 241            if not isinstance(parent, exp.Unnest):
 242                unnest = exp.Unnest(expressions=[generate_date_array.copy()])
 243                generate_date_array.replace(
 244                    exp.select(exp.ArrayAgg(this=exp.Star())).from_(unnest).subquery()
 245                )
 246
 247            if (
 248                isinstance(parent, exp.Unnest)
 249                and isinstance(parent.parent, (exp.From, exp.Join))
 250                and len(parent.expressions) == 1
 251            ):
 252                _unnest_generate_date_array(parent)
 253
 254    return expression
 255
 256
 257def _build_regexp_extract(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
 258    def _builder(args: t.List) -> E:
 259        return expr_type(
 260            this=seq_get(args, 0),
 261            expression=seq_get(args, 1),
 262            position=seq_get(args, 2),
 263            occurrence=seq_get(args, 3),
 264            parameters=seq_get(args, 4),
 265            group=seq_get(args, 5) or exp.Literal.number(0),
 266        )
 267
 268    return _builder
 269
 270
 271def _regexpextract_sql(self, expression: exp.RegexpExtract | exp.RegexpExtractAll) -> str:
 272    # Other dialects don't support all of the following parameters, so we need to
 273    # generate default values as necessary to ensure the transpilation is correct
 274    group = expression.args.get("group")
 275
 276    # To avoid generating all these default values, we set group to None if
 277    # it's 0 (also default value) which doesn't trigger the following chain
 278    if group and group.name == "0":
 279        group = None
 280
 281    parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 282    occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 283    position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 284
 285    return self.func(
 286        "REGEXP_SUBSTR" if isinstance(expression, exp.RegexpExtract) else "REGEXP_EXTRACT_ALL",
 287        expression.this,
 288        expression.expression,
 289        position,
 290        occurrence,
 291        parameters,
 292        group,
 293    )
 294
 295
 296def _json_extract_value_array_sql(
 297    self: Snowflake.Generator, expression: exp.JSONValueArray | exp.JSONExtractArray
 298) -> str:
 299    json_extract = exp.JSONExtract(this=expression.this, expression=expression.expression)
 300    ident = exp.to_identifier("x")
 301
 302    if isinstance(expression, exp.JSONValueArray):
 303        this: exp.Expression = exp.cast(ident, to=exp.DataType.Type.VARCHAR)
 304    else:
 305        this = exp.ParseJSON(this=f"TO_JSON({ident})")
 306
 307    transform_lambda = exp.Lambda(expressions=[ident], this=this)
 308
 309    return self.func("TRANSFORM", json_extract, transform_lambda)
 310
 311
 312class Snowflake(Dialect):
 313    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 314    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 315    NULL_ORDERING = "nulls_are_large"
 316    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 317    SUPPORTS_USER_DEFINED_TYPES = False
 318    SUPPORTS_SEMI_ANTI_JOIN = False
 319    PREFER_CTE_ALIAS_COLUMN = True
 320    TABLESAMPLE_SIZE_IS_PERCENT = True
 321    COPY_PARAMS_ARE_CSV = False
 322    ARRAY_AGG_INCLUDES_NULLS = None
 323
 324    TIME_MAPPING = {
 325        "YYYY": "%Y",
 326        "yyyy": "%Y",
 327        "YY": "%y",
 328        "yy": "%y",
 329        "MMMM": "%B",
 330        "mmmm": "%B",
 331        "MON": "%b",
 332        "mon": "%b",
 333        "MM": "%m",
 334        "mm": "%m",
 335        "DD": "%d",
 336        "dd": "%-d",
 337        "DY": "%a",
 338        "dy": "%w",
 339        "HH24": "%H",
 340        "hh24": "%H",
 341        "HH12": "%I",
 342        "hh12": "%I",
 343        "MI": "%M",
 344        "mi": "%M",
 345        "SS": "%S",
 346        "ss": "%S",
 347        "FF": "%f",
 348        "ff": "%f",
 349        "FF6": "%f",
 350        "ff6": "%f",
 351    }
 352
 353    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 354        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 355        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 356        if (
 357            isinstance(expression, exp.Identifier)
 358            and isinstance(expression.parent, exp.Table)
 359            and expression.name.lower() == "dual"
 360        ):
 361            return expression  # type: ignore
 362
 363        return super().quote_identifier(expression, identify=identify)
 364
 365    class Parser(parser.Parser):
 366        IDENTIFY_PIVOT_STRINGS = True
 367        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 368        COLON_IS_VARIANT_EXTRACT = True
 369
 370        ID_VAR_TOKENS = {
 371            *parser.Parser.ID_VAR_TOKENS,
 372            TokenType.MATCH_CONDITION,
 373        }
 374
 375        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 376        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 377
 378        FUNCTIONS = {
 379            **parser.Parser.FUNCTIONS,
 380            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 381            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 382            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 383                this=seq_get(args, 1), expression=seq_get(args, 0)
 384            ),
 385            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 386                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 387                start=seq_get(args, 0),
 388                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 389                step=seq_get(args, 2),
 390            ),
 391            "BITXOR": binary_from_function(exp.BitwiseXor),
 392            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 393            "BOOLXOR": binary_from_function(exp.Xor),
 394            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 395            "DATE_TRUNC": _date_trunc_to_time,
 396            "DATEADD": _build_date_time_add(exp.DateAdd),
 397            "DATEDIFF": _build_datediff,
 398            "DIV0": _build_if_from_div0,
 399            "EDITDISTANCE": lambda args: exp.Levenshtein(
 400                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 401            ),
 402            "FLATTEN": exp.Explode.from_arg_list,
 403            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 404                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 405            ),
 406            "IFF": exp.If.from_arg_list,
 407            "LAST_DAY": lambda args: exp.LastDay(
 408                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 409            ),
 410            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 411            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 412            "LISTAGG": exp.GroupConcat.from_arg_list,
 413            "NULLIFZERO": _build_if_from_nullifzero,
 414            "OBJECT_CONSTRUCT": _build_object_construct,
 415            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 416            "REGEXP_REPLACE": _build_regexp_replace,
 417            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 418            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 419            "RLIKE": exp.RegexpLike.from_arg_list,
 420            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 421            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 422            "TIMEDIFF": _build_datediff,
 423            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 424            "TIMESTAMPDIFF": _build_datediff,
 425            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 426            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 427            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 428            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 429            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 430            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 431            "TRY_TO_TIMESTAMP": _build_datetime(
 432                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 433            ),
 434            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 435            "TO_NUMBER": lambda args: exp.ToNumber(
 436                this=seq_get(args, 0),
 437                format=seq_get(args, 1),
 438                precision=seq_get(args, 2),
 439                scale=seq_get(args, 3),
 440            ),
 441            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 442            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 443            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 444            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 445            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 446            "TO_VARCHAR": exp.ToChar.from_arg_list,
 447            "ZEROIFNULL": _build_if_from_zeroifnull,
 448        }
 449
 450        FUNCTION_PARSERS = {
 451            **parser.Parser.FUNCTION_PARSERS,
 452            "DATE_PART": lambda self: self._parse_date_part(),
 453            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 454        }
 455        FUNCTION_PARSERS.pop("TRIM")
 456
 457        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 458
 459        RANGE_PARSERS = {
 460            **parser.Parser.RANGE_PARSERS,
 461            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 462            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 463        }
 464
 465        ALTER_PARSERS = {
 466            **parser.Parser.ALTER_PARSERS,
 467            "UNSET": lambda self: self.expression(
 468                exp.Set,
 469                tag=self._match_text_seq("TAG"),
 470                expressions=self._parse_csv(self._parse_id_var),
 471                unset=True,
 472            ),
 473        }
 474
 475        STATEMENT_PARSERS = {
 476            **parser.Parser.STATEMENT_PARSERS,
 477            TokenType.SHOW: lambda self: self._parse_show(),
 478        }
 479
 480        PROPERTY_PARSERS = {
 481            **parser.Parser.PROPERTY_PARSERS,
 482            "LOCATION": lambda self: self._parse_location_property(),
 483            "TAG": lambda self: self._parse_tag(),
 484        }
 485
 486        TYPE_CONVERTERS = {
 487            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 488            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 489        }
 490
 491        SHOW_PARSERS = {
 492            "SCHEMAS": _show_parser("SCHEMAS"),
 493            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 494            "OBJECTS": _show_parser("OBJECTS"),
 495            "TERSE OBJECTS": _show_parser("OBJECTS"),
 496            "TABLES": _show_parser("TABLES"),
 497            "TERSE TABLES": _show_parser("TABLES"),
 498            "VIEWS": _show_parser("VIEWS"),
 499            "TERSE VIEWS": _show_parser("VIEWS"),
 500            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 501            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 502            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 503            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 504            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 505            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 506            "SEQUENCES": _show_parser("SEQUENCES"),
 507            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 508            "COLUMNS": _show_parser("COLUMNS"),
 509            "USERS": _show_parser("USERS"),
 510            "TERSE USERS": _show_parser("USERS"),
 511        }
 512
 513        CONSTRAINT_PARSERS = {
 514            **parser.Parser.CONSTRAINT_PARSERS,
 515            "WITH": lambda self: self._parse_with_constraint(),
 516            "MASKING": lambda self: self._parse_with_constraint(),
 517            "PROJECTION": lambda self: self._parse_with_constraint(),
 518            "TAG": lambda self: self._parse_with_constraint(),
 519        }
 520
 521        STAGED_FILE_SINGLE_TOKENS = {
 522            TokenType.DOT,
 523            TokenType.MOD,
 524            TokenType.SLASH,
 525        }
 526
 527        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 528
 529        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 530
 531        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 532
 533        LAMBDAS = {
 534            **parser.Parser.LAMBDAS,
 535            TokenType.ARROW: lambda self, expressions: self.expression(
 536                exp.Lambda,
 537                this=self._replace_lambda(
 538                    self._parse_assignment(),
 539                    expressions,
 540                ),
 541                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 542            ),
 543        }
 544
 545        def _negate_range(
 546            self, this: t.Optional[exp.Expression] = None
 547        ) -> t.Optional[exp.Expression]:
 548            if not this:
 549                return this
 550
 551            query = this.args.get("query")
 552            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 553                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 554                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 555                # which can produce different results (most likely a SnowFlake bug).
 556                #
 557                # https://docs.snowflake.com/en/sql-reference/functions/in
 558                # Context: https://github.com/tobymao/sqlglot/issues/3890
 559                return self.expression(
 560                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 561                )
 562
 563            return self.expression(exp.Not, this=this)
 564
 565        def _parse_tag(self) -> exp.Tags:
 566            return self.expression(
 567                exp.Tags,
 568                expressions=self._parse_wrapped_csv(self._parse_property),
 569            )
 570
 571        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 572            if self._prev.token_type != TokenType.WITH:
 573                self._retreat(self._index - 1)
 574
 575            if self._match_text_seq("MASKING", "POLICY"):
 576                policy = self._parse_column()
 577                return self.expression(
 578                    exp.MaskingPolicyColumnConstraint,
 579                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 580                    expressions=self._match(TokenType.USING)
 581                    and self._parse_wrapped_csv(self._parse_id_var),
 582                )
 583            if self._match_text_seq("PROJECTION", "POLICY"):
 584                policy = self._parse_column()
 585                return self.expression(
 586                    exp.ProjectionPolicyColumnConstraint,
 587                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 588                )
 589            if self._match(TokenType.TAG):
 590                return self._parse_tag()
 591
 592            return None
 593
 594        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
 595            if self._match(TokenType.TAG):
 596                return self._parse_tag()
 597
 598            return super()._parse_with_property()
 599
 600        def _parse_create(self) -> exp.Create | exp.Command:
 601            expression = super()._parse_create()
 602            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 603                # Replace the Table node with the enclosed Identifier
 604                expression.this.replace(expression.this.this)
 605
 606            return expression
 607
 608        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 609        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 610        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 611            this = self._parse_var() or self._parse_type()
 612
 613            if not this:
 614                return None
 615
 616            self._match(TokenType.COMMA)
 617            expression = self._parse_bitwise()
 618            this = map_date_part(this)
 619            name = this.name.upper()
 620
 621            if name.startswith("EPOCH"):
 622                if name == "EPOCH_MILLISECOND":
 623                    scale = 10**3
 624                elif name == "EPOCH_MICROSECOND":
 625                    scale = 10**6
 626                elif name == "EPOCH_NANOSECOND":
 627                    scale = 10**9
 628                else:
 629                    scale = None
 630
 631                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 632                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 633
 634                if scale:
 635                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 636
 637                return to_unix
 638
 639            return self.expression(exp.Extract, this=this, expression=expression)
 640
 641        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 642            if is_map:
 643                # Keys are strings in Snowflake's objects, see also:
 644                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 645                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 646                return self._parse_slice(self._parse_string())
 647
 648            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 649
 650        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 651            lateral = super()._parse_lateral()
 652            if not lateral:
 653                return lateral
 654
 655            if isinstance(lateral.this, exp.Explode):
 656                table_alias = lateral.args.get("alias")
 657                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 658                if table_alias and not table_alias.args.get("columns"):
 659                    table_alias.set("columns", columns)
 660                elif not table_alias:
 661                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 662
 663            return lateral
 664
 665        def _parse_table_parts(
 666            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 667        ) -> exp.Table:
 668            # https://docs.snowflake.com/en/user-guide/querying-stage
 669            if self._match(TokenType.STRING, advance=False):
 670                table = self._parse_string()
 671            elif self._match_text_seq("@", advance=False):
 672                table = self._parse_location_path()
 673            else:
 674                table = None
 675
 676            if table:
 677                file_format = None
 678                pattern = None
 679
 680                wrapped = self._match(TokenType.L_PAREN)
 681                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 682                    if self._match_text_seq("FILE_FORMAT", "=>"):
 683                        file_format = self._parse_string() or super()._parse_table_parts(
 684                            is_db_reference=is_db_reference
 685                        )
 686                    elif self._match_text_seq("PATTERN", "=>"):
 687                        pattern = self._parse_string()
 688                    else:
 689                        break
 690
 691                    self._match(TokenType.COMMA)
 692
 693                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 694            else:
 695                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 696
 697            return table
 698
 699        def _parse_id_var(
 700            self,
 701            any_token: bool = True,
 702            tokens: t.Optional[t.Collection[TokenType]] = None,
 703        ) -> t.Optional[exp.Expression]:
 704            if self._match_text_seq("IDENTIFIER", "("):
 705                identifier = (
 706                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 707                    or self._parse_string()
 708                )
 709                self._match_r_paren()
 710                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 711
 712            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 713
 714        def _parse_show_snowflake(self, this: str) -> exp.Show:
 715            scope = None
 716            scope_kind = None
 717
 718            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 719            # which is syntactically valid but has no effect on the output
 720            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 721
 722            history = self._match_text_seq("HISTORY")
 723
 724            like = self._parse_string() if self._match(TokenType.LIKE) else None
 725
 726            if self._match(TokenType.IN):
 727                if self._match_text_seq("ACCOUNT"):
 728                    scope_kind = "ACCOUNT"
 729                elif self._match_set(self.DB_CREATABLES):
 730                    scope_kind = self._prev.text.upper()
 731                    if self._curr:
 732                        scope = self._parse_table_parts()
 733                elif self._curr:
 734                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 735                    scope = self._parse_table_parts()
 736
 737            return self.expression(
 738                exp.Show,
 739                **{
 740                    "terse": terse,
 741                    "this": this,
 742                    "history": history,
 743                    "like": like,
 744                    "scope": scope,
 745                    "scope_kind": scope_kind,
 746                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 747                    "limit": self._parse_limit(),
 748                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 749                },
 750            )
 751
 752        def _parse_location_property(self) -> exp.LocationProperty:
 753            self._match(TokenType.EQ)
 754            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 755
 756        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 757            # Parse either a subquery or a staged file
 758            return (
 759                self._parse_select(table=True, parse_subquery_alias=False)
 760                if self._match(TokenType.L_PAREN, advance=False)
 761                else self._parse_table_parts()
 762            )
 763
 764        def _parse_location_path(self) -> exp.Var:
 765            parts = [self._advance_any(ignore_reserved=True)]
 766
 767            # We avoid consuming a comma token because external tables like @foo and @bar
 768            # can be joined in a query with a comma separator, as well as closing paren
 769            # in case of subqueries
 770            while self._is_connected() and not self._match_set(
 771                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 772            ):
 773                parts.append(self._advance_any(ignore_reserved=True))
 774
 775            return exp.var("".join(part.text for part in parts if part))
 776
 777        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 778            this = super()._parse_lambda_arg()
 779
 780            if not this:
 781                return this
 782
 783            typ = self._parse_types()
 784
 785            if typ:
 786                return self.expression(exp.Cast, this=this, to=typ)
 787
 788            return this
 789
 790    class Tokenizer(tokens.Tokenizer):
 791        STRING_ESCAPES = ["\\", "'"]
 792        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 793        RAW_STRINGS = ["$$"]
 794        COMMENTS = ["--", "//", ("/*", "*/")]
 795        NESTED_COMMENTS = False
 796
 797        KEYWORDS = {
 798            **tokens.Tokenizer.KEYWORDS,
 799            "BYTEINT": TokenType.INT,
 800            "CHAR VARYING": TokenType.VARCHAR,
 801            "CHARACTER VARYING": TokenType.VARCHAR,
 802            "EXCLUDE": TokenType.EXCEPT,
 803            "ILIKE ANY": TokenType.ILIKE_ANY,
 804            "LIKE ANY": TokenType.LIKE_ANY,
 805            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 806            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 807            "MINUS": TokenType.EXCEPT,
 808            "NCHAR VARYING": TokenType.VARCHAR,
 809            "PUT": TokenType.COMMAND,
 810            "REMOVE": TokenType.COMMAND,
 811            "RM": TokenType.COMMAND,
 812            "SAMPLE": TokenType.TABLE_SAMPLE,
 813            "SQL_DOUBLE": TokenType.DOUBLE,
 814            "SQL_VARCHAR": TokenType.VARCHAR,
 815            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 816            "TAG": TokenType.TAG,
 817            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 818            "TOP": TokenType.TOP,
 819            "WAREHOUSE": TokenType.WAREHOUSE,
 820            "STREAMLIT": TokenType.STREAMLIT,
 821        }
 822        KEYWORDS.pop("/*+")
 823
 824        SINGLE_TOKENS = {
 825            **tokens.Tokenizer.SINGLE_TOKENS,
 826            "$": TokenType.PARAMETER,
 827        }
 828
 829        VAR_SINGLE_TOKENS = {"$"}
 830
 831        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 832
 833    class Generator(generator.Generator):
 834        PARAMETER_TOKEN = "$"
 835        MATCHED_BY_SOURCE = False
 836        SINGLE_STRING_INTERVAL = True
 837        JOIN_HINTS = False
 838        TABLE_HINTS = False
 839        QUERY_HINTS = False
 840        AGGREGATE_FILTER_SUPPORTED = False
 841        SUPPORTS_TABLE_COPY = False
 842        COLLATE_IS_FUNC = True
 843        LIMIT_ONLY_LITERALS = True
 844        JSON_KEY_VALUE_PAIR_SEP = ","
 845        INSERT_OVERWRITE = " OVERWRITE INTO"
 846        STRUCT_DELIMITER = ("(", ")")
 847        COPY_PARAMS_ARE_WRAPPED = False
 848        COPY_PARAMS_EQ_REQUIRED = True
 849        STAR_EXCEPT = "EXCLUDE"
 850        SUPPORTS_EXPLODING_PROJECTIONS = False
 851        ARRAY_CONCAT_IS_VAR_LEN = False
 852        SUPPORTS_CONVERT_TIMEZONE = True
 853        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 854        SUPPORTS_MEDIAN = True
 855        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 856
 857        TRANSFORMS = {
 858            **generator.Generator.TRANSFORMS,
 859            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 860            exp.ArgMax: rename_func("MAX_BY"),
 861            exp.ArgMin: rename_func("MIN_BY"),
 862            exp.Array: inline_array_sql,
 863            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 864            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 865            exp.AtTimeZone: lambda self, e: self.func(
 866                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 867            ),
 868            exp.BitwiseXor: rename_func("BITXOR"),
 869            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 870            exp.DateAdd: date_delta_sql("DATEADD"),
 871            exp.DateDiff: date_delta_sql("DATEDIFF"),
 872            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 873            exp.DatetimeDiff: timestampdiff_sql,
 874            exp.DateStrToDate: datestrtodate_sql,
 875            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 876            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 877            exp.DayOfYear: rename_func("DAYOFYEAR"),
 878            exp.Explode: rename_func("FLATTEN"),
 879            exp.Extract: rename_func("DATE_PART"),
 880            exp.FromTimeZone: lambda self, e: self.func(
 881                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 882            ),
 883            exp.GenerateSeries: lambda self, e: self.func(
 884                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 885            ),
 886            exp.GroupConcat: rename_func("LISTAGG"),
 887            exp.If: if_sql(name="IFF", false_value="NULL"),
 888            exp.JSONExtractArray: _json_extract_value_array_sql,
 889            exp.JSONExtractScalar: lambda self, e: self.func(
 890                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 891            ),
 892            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 893            exp.JSONPathRoot: lambda *_: "",
 894            exp.JSONValueArray: _json_extract_value_array_sql,
 895            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 896            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 897            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 898            exp.MakeInterval: no_make_interval_sql,
 899            exp.Max: max_or_greatest,
 900            exp.Min: min_or_least,
 901            exp.ParseJSON: lambda self, e: self.func(
 902                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 903            ),
 904            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 905            exp.PercentileCont: transforms.preprocess(
 906                [transforms.add_within_group_for_percentiles]
 907            ),
 908            exp.PercentileDisc: transforms.preprocess(
 909                [transforms.add_within_group_for_percentiles]
 910            ),
 911            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 912            exp.RegexpExtract: _regexpextract_sql,
 913            exp.RegexpExtractAll: _regexpextract_sql,
 914            exp.RegexpILike: _regexpilike_sql,
 915            exp.Rand: rename_func("RANDOM"),
 916            exp.Select: transforms.preprocess(
 917                [
 918                    transforms.eliminate_distinct_on,
 919                    transforms.explode_to_unnest(),
 920                    transforms.eliminate_semi_and_anti_joins,
 921                    _transform_generate_date_array,
 922                ]
 923            ),
 924            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 925            exp.SHA: rename_func("SHA1"),
 926            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 927            exp.StartsWith: rename_func("STARTSWITH"),
 928            exp.StrPosition: lambda self, e: self.func(
 929                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 930            ),
 931            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 932            exp.Stuff: rename_func("INSERT"),
 933            exp.TimeAdd: date_delta_sql("TIMEADD"),
 934            exp.Timestamp: no_timestamp_sql,
 935            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 936            exp.TimestampDiff: lambda self, e: self.func(
 937                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 938            ),
 939            exp.TimestampTrunc: timestamptrunc_sql(),
 940            exp.TimeStrToTime: timestrtotime_sql,
 941            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 942            exp.ToArray: rename_func("TO_ARRAY"),
 943            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 944            exp.ToDouble: rename_func("TO_DOUBLE"),
 945            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 946            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 947            exp.TsOrDsToDate: lambda self, e: self.func(
 948                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 949            ),
 950            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 951            exp.Uuid: rename_func("UUID_STRING"),
 952            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 953            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 954            exp.Xor: rename_func("BOOLXOR"),
 955            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 956                rename_func("EDITDISTANCE")
 957            ),
 958        }
 959
 960        SUPPORTED_JSON_PATH_PARTS = {
 961            exp.JSONPathKey,
 962            exp.JSONPathRoot,
 963            exp.JSONPathSubscript,
 964        }
 965
 966        TYPE_MAPPING = {
 967            **generator.Generator.TYPE_MAPPING,
 968            exp.DataType.Type.NESTED: "OBJECT",
 969            exp.DataType.Type.STRUCT: "OBJECT",
 970        }
 971
 972        PROPERTIES_LOCATION = {
 973            **generator.Generator.PROPERTIES_LOCATION,
 974            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 975            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 976        }
 977
 978        UNSUPPORTED_VALUES_EXPRESSIONS = {
 979            exp.Map,
 980            exp.StarMap,
 981            exp.Struct,
 982            exp.VarMap,
 983        }
 984
 985        def with_properties(self, properties: exp.Properties) -> str:
 986            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 987
 988        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 989            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 990                values_as_table = False
 991
 992            return super().values_sql(expression, values_as_table=values_as_table)
 993
 994        def datatype_sql(self, expression: exp.DataType) -> str:
 995            expressions = expression.expressions
 996            if (
 997                expressions
 998                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 999                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1000            ):
1001                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1002                return "OBJECT"
1003
1004            return super().datatype_sql(expression)
1005
1006        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1007            return self.func(
1008                "TO_NUMBER",
1009                expression.this,
1010                expression.args.get("format"),
1011                expression.args.get("precision"),
1012                expression.args.get("scale"),
1013            )
1014
1015        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1016            milli = expression.args.get("milli")
1017            if milli is not None:
1018                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1019                expression.set("nano", milli_to_nano)
1020
1021            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1022
1023        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1024            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1025                return self.func("TO_GEOGRAPHY", expression.this)
1026            if expression.is_type(exp.DataType.Type.GEOMETRY):
1027                return self.func("TO_GEOMETRY", expression.this)
1028
1029            return super().cast_sql(expression, safe_prefix=safe_prefix)
1030
1031        def trycast_sql(self, expression: exp.TryCast) -> str:
1032            value = expression.this
1033
1034            if value.type is None:
1035                from sqlglot.optimizer.annotate_types import annotate_types
1036
1037                value = annotate_types(value)
1038
1039            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1040                return super().trycast_sql(expression)
1041
1042            # TRY_CAST only works for string values in Snowflake
1043            return self.cast_sql(expression)
1044
1045        def log_sql(self, expression: exp.Log) -> str:
1046            if not expression.expression:
1047                return self.func("LN", expression.this)
1048
1049            return super().log_sql(expression)
1050
1051        def unnest_sql(self, expression: exp.Unnest) -> str:
1052            unnest_alias = expression.args.get("alias")
1053            offset = expression.args.get("offset")
1054
1055            columns = [
1056                exp.to_identifier("seq"),
1057                exp.to_identifier("key"),
1058                exp.to_identifier("path"),
1059                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1060                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1061                or exp.to_identifier("value"),
1062                exp.to_identifier("this"),
1063            ]
1064
1065            if unnest_alias:
1066                unnest_alias.set("columns", columns)
1067            else:
1068                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1069
1070            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1071            alias = self.sql(unnest_alias)
1072            alias = f" AS {alias}" if alias else ""
1073            return f"{explode}{alias}"
1074
1075        def show_sql(self, expression: exp.Show) -> str:
1076            terse = "TERSE " if expression.args.get("terse") else ""
1077            history = " HISTORY" if expression.args.get("history") else ""
1078            like = self.sql(expression, "like")
1079            like = f" LIKE {like}" if like else ""
1080
1081            scope = self.sql(expression, "scope")
1082            scope = f" {scope}" if scope else ""
1083
1084            scope_kind = self.sql(expression, "scope_kind")
1085            if scope_kind:
1086                scope_kind = f" IN {scope_kind}"
1087
1088            starts_with = self.sql(expression, "starts_with")
1089            if starts_with:
1090                starts_with = f" STARTS WITH {starts_with}"
1091
1092            limit = self.sql(expression, "limit")
1093
1094            from_ = self.sql(expression, "from")
1095            if from_:
1096                from_ = f" FROM {from_}"
1097
1098            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1099
1100        def describe_sql(self, expression: exp.Describe) -> str:
1101            # Default to table if kind is unknown
1102            kind_value = expression.args.get("kind") or "TABLE"
1103            kind = f" {kind_value}" if kind_value else ""
1104            this = f" {self.sql(expression, 'this')}"
1105            expressions = self.expressions(expression, flat=True)
1106            expressions = f" {expressions}" if expressions else ""
1107            return f"DESCRIBE{kind}{this}{expressions}"
1108
1109        def generatedasidentitycolumnconstraint_sql(
1110            self, expression: exp.GeneratedAsIdentityColumnConstraint
1111        ) -> str:
1112            start = expression.args.get("start")
1113            start = f" START {start}" if start else ""
1114            increment = expression.args.get("increment")
1115            increment = f" INCREMENT {increment}" if increment else ""
1116            return f"AUTOINCREMENT{start}{increment}"
1117
1118        def cluster_sql(self, expression: exp.Cluster) -> str:
1119            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1120
1121        def struct_sql(self, expression: exp.Struct) -> str:
1122            keys = []
1123            values = []
1124
1125            for i, e in enumerate(expression.expressions):
1126                if isinstance(e, exp.PropertyEQ):
1127                    keys.append(
1128                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1129                    )
1130                    values.append(e.expression)
1131                else:
1132                    keys.append(exp.Literal.string(f"_{i}"))
1133                    values.append(e)
1134
1135            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1136
1137        @unsupported_args("weight", "accuracy")
1138        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1139            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1140
1141        def alterset_sql(self, expression: exp.AlterSet) -> str:
1142            exprs = self.expressions(expression, flat=True)
1143            exprs = f" {exprs}" if exprs else ""
1144            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1145            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1146            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1147            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1148            tag = self.expressions(expression, key="tag", flat=True)
1149            tag = f" TAG {tag}" if tag else ""
1150
1151            return f"SET{exprs}{file_format}{copy_options}{tag}"
1152
1153        def strtotime_sql(self, expression: exp.StrToTime):
1154            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1155            return self.func(
1156                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1157            )
1158
1159        def timestampsub_sql(self, expression: exp.TimestampSub):
1160            return self.sql(
1161                exp.TimestampAdd(
1162                    this=expression.this,
1163                    expression=expression.expression * -1,
1164                    unit=expression.unit,
1165                )
1166            )
1167
1168        def jsonextract_sql(self, expression: exp.JSONExtract):
1169            this = expression.this
1170
1171            # JSON strings are valid coming from other dialects such as BQ
1172            return self.func(
1173                "GET_PATH",
1174                exp.ParseJSON(this=this) if this.is_string else this,
1175                expression.expression,
1176            )
1177
1178        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1179            this = expression.this
1180            if not isinstance(this, exp.TsOrDsToTimestamp):
1181                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1182
1183            return self.func("TO_CHAR", this, self.format_time(expression))
class Snowflake(sqlglot.dialects.dialect.Dialect):
 313class Snowflake(Dialect):
 314    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 315    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 316    NULL_ORDERING = "nulls_are_large"
 317    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 318    SUPPORTS_USER_DEFINED_TYPES = False
 319    SUPPORTS_SEMI_ANTI_JOIN = False
 320    PREFER_CTE_ALIAS_COLUMN = True
 321    TABLESAMPLE_SIZE_IS_PERCENT = True
 322    COPY_PARAMS_ARE_CSV = False
 323    ARRAY_AGG_INCLUDES_NULLS = None
 324
 325    TIME_MAPPING = {
 326        "YYYY": "%Y",
 327        "yyyy": "%Y",
 328        "YY": "%y",
 329        "yy": "%y",
 330        "MMMM": "%B",
 331        "mmmm": "%B",
 332        "MON": "%b",
 333        "mon": "%b",
 334        "MM": "%m",
 335        "mm": "%m",
 336        "DD": "%d",
 337        "dd": "%-d",
 338        "DY": "%a",
 339        "dy": "%w",
 340        "HH24": "%H",
 341        "hh24": "%H",
 342        "HH12": "%I",
 343        "hh12": "%I",
 344        "MI": "%M",
 345        "mi": "%M",
 346        "SS": "%S",
 347        "ss": "%S",
 348        "FF": "%f",
 349        "ff": "%f",
 350        "FF6": "%f",
 351        "ff6": "%f",
 352    }
 353
 354    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 355        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 356        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 357        if (
 358            isinstance(expression, exp.Identifier)
 359            and isinstance(expression.parent, exp.Table)
 360            and expression.name.lower() == "dual"
 361        ):
 362            return expression  # type: ignore
 363
 364        return super().quote_identifier(expression, identify=identify)
 365
 366    class Parser(parser.Parser):
 367        IDENTIFY_PIVOT_STRINGS = True
 368        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 369        COLON_IS_VARIANT_EXTRACT = True
 370
 371        ID_VAR_TOKENS = {
 372            *parser.Parser.ID_VAR_TOKENS,
 373            TokenType.MATCH_CONDITION,
 374        }
 375
 376        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 377        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 378
 379        FUNCTIONS = {
 380            **parser.Parser.FUNCTIONS,
 381            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 382            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 383            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 384                this=seq_get(args, 1), expression=seq_get(args, 0)
 385            ),
 386            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 387                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 388                start=seq_get(args, 0),
 389                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 390                step=seq_get(args, 2),
 391            ),
 392            "BITXOR": binary_from_function(exp.BitwiseXor),
 393            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 394            "BOOLXOR": binary_from_function(exp.Xor),
 395            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 396            "DATE_TRUNC": _date_trunc_to_time,
 397            "DATEADD": _build_date_time_add(exp.DateAdd),
 398            "DATEDIFF": _build_datediff,
 399            "DIV0": _build_if_from_div0,
 400            "EDITDISTANCE": lambda args: exp.Levenshtein(
 401                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 402            ),
 403            "FLATTEN": exp.Explode.from_arg_list,
 404            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 405                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 406            ),
 407            "IFF": exp.If.from_arg_list,
 408            "LAST_DAY": lambda args: exp.LastDay(
 409                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 410            ),
 411            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 412            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 413            "LISTAGG": exp.GroupConcat.from_arg_list,
 414            "NULLIFZERO": _build_if_from_nullifzero,
 415            "OBJECT_CONSTRUCT": _build_object_construct,
 416            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 417            "REGEXP_REPLACE": _build_regexp_replace,
 418            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 419            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 420            "RLIKE": exp.RegexpLike.from_arg_list,
 421            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 422            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 423            "TIMEDIFF": _build_datediff,
 424            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 425            "TIMESTAMPDIFF": _build_datediff,
 426            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 427            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 428            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 429            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 430            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 431            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 432            "TRY_TO_TIMESTAMP": _build_datetime(
 433                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 434            ),
 435            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 436            "TO_NUMBER": lambda args: exp.ToNumber(
 437                this=seq_get(args, 0),
 438                format=seq_get(args, 1),
 439                precision=seq_get(args, 2),
 440                scale=seq_get(args, 3),
 441            ),
 442            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 443            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 444            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 445            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 446            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 447            "TO_VARCHAR": exp.ToChar.from_arg_list,
 448            "ZEROIFNULL": _build_if_from_zeroifnull,
 449        }
 450
 451        FUNCTION_PARSERS = {
 452            **parser.Parser.FUNCTION_PARSERS,
 453            "DATE_PART": lambda self: self._parse_date_part(),
 454            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 455        }
 456        FUNCTION_PARSERS.pop("TRIM")
 457
 458        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 459
 460        RANGE_PARSERS = {
 461            **parser.Parser.RANGE_PARSERS,
 462            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 463            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 464        }
 465
 466        ALTER_PARSERS = {
 467            **parser.Parser.ALTER_PARSERS,
 468            "UNSET": lambda self: self.expression(
 469                exp.Set,
 470                tag=self._match_text_seq("TAG"),
 471                expressions=self._parse_csv(self._parse_id_var),
 472                unset=True,
 473            ),
 474        }
 475
 476        STATEMENT_PARSERS = {
 477            **parser.Parser.STATEMENT_PARSERS,
 478            TokenType.SHOW: lambda self: self._parse_show(),
 479        }
 480
 481        PROPERTY_PARSERS = {
 482            **parser.Parser.PROPERTY_PARSERS,
 483            "LOCATION": lambda self: self._parse_location_property(),
 484            "TAG": lambda self: self._parse_tag(),
 485        }
 486
 487        TYPE_CONVERTERS = {
 488            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 489            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 490        }
 491
 492        SHOW_PARSERS = {
 493            "SCHEMAS": _show_parser("SCHEMAS"),
 494            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 495            "OBJECTS": _show_parser("OBJECTS"),
 496            "TERSE OBJECTS": _show_parser("OBJECTS"),
 497            "TABLES": _show_parser("TABLES"),
 498            "TERSE TABLES": _show_parser("TABLES"),
 499            "VIEWS": _show_parser("VIEWS"),
 500            "TERSE VIEWS": _show_parser("VIEWS"),
 501            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 502            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 503            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 504            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 505            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 506            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 507            "SEQUENCES": _show_parser("SEQUENCES"),
 508            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 509            "COLUMNS": _show_parser("COLUMNS"),
 510            "USERS": _show_parser("USERS"),
 511            "TERSE USERS": _show_parser("USERS"),
 512        }
 513
 514        CONSTRAINT_PARSERS = {
 515            **parser.Parser.CONSTRAINT_PARSERS,
 516            "WITH": lambda self: self._parse_with_constraint(),
 517            "MASKING": lambda self: self._parse_with_constraint(),
 518            "PROJECTION": lambda self: self._parse_with_constraint(),
 519            "TAG": lambda self: self._parse_with_constraint(),
 520        }
 521
 522        STAGED_FILE_SINGLE_TOKENS = {
 523            TokenType.DOT,
 524            TokenType.MOD,
 525            TokenType.SLASH,
 526        }
 527
 528        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 529
 530        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 531
 532        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 533
 534        LAMBDAS = {
 535            **parser.Parser.LAMBDAS,
 536            TokenType.ARROW: lambda self, expressions: self.expression(
 537                exp.Lambda,
 538                this=self._replace_lambda(
 539                    self._parse_assignment(),
 540                    expressions,
 541                ),
 542                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 543            ),
 544        }
 545
 546        def _negate_range(
 547            self, this: t.Optional[exp.Expression] = None
 548        ) -> t.Optional[exp.Expression]:
 549            if not this:
 550                return this
 551
 552            query = this.args.get("query")
 553            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 554                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 555                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 556                # which can produce different results (most likely a SnowFlake bug).
 557                #
 558                # https://docs.snowflake.com/en/sql-reference/functions/in
 559                # Context: https://github.com/tobymao/sqlglot/issues/3890
 560                return self.expression(
 561                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 562                )
 563
 564            return self.expression(exp.Not, this=this)
 565
 566        def _parse_tag(self) -> exp.Tags:
 567            return self.expression(
 568                exp.Tags,
 569                expressions=self._parse_wrapped_csv(self._parse_property),
 570            )
 571
 572        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 573            if self._prev.token_type != TokenType.WITH:
 574                self._retreat(self._index - 1)
 575
 576            if self._match_text_seq("MASKING", "POLICY"):
 577                policy = self._parse_column()
 578                return self.expression(
 579                    exp.MaskingPolicyColumnConstraint,
 580                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 581                    expressions=self._match(TokenType.USING)
 582                    and self._parse_wrapped_csv(self._parse_id_var),
 583                )
 584            if self._match_text_seq("PROJECTION", "POLICY"):
 585                policy = self._parse_column()
 586                return self.expression(
 587                    exp.ProjectionPolicyColumnConstraint,
 588                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 589                )
 590            if self._match(TokenType.TAG):
 591                return self._parse_tag()
 592
 593            return None
 594
 595        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
 596            if self._match(TokenType.TAG):
 597                return self._parse_tag()
 598
 599            return super()._parse_with_property()
 600
 601        def _parse_create(self) -> exp.Create | exp.Command:
 602            expression = super()._parse_create()
 603            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 604                # Replace the Table node with the enclosed Identifier
 605                expression.this.replace(expression.this.this)
 606
 607            return expression
 608
 609        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 610        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 611        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 612            this = self._parse_var() or self._parse_type()
 613
 614            if not this:
 615                return None
 616
 617            self._match(TokenType.COMMA)
 618            expression = self._parse_bitwise()
 619            this = map_date_part(this)
 620            name = this.name.upper()
 621
 622            if name.startswith("EPOCH"):
 623                if name == "EPOCH_MILLISECOND":
 624                    scale = 10**3
 625                elif name == "EPOCH_MICROSECOND":
 626                    scale = 10**6
 627                elif name == "EPOCH_NANOSECOND":
 628                    scale = 10**9
 629                else:
 630                    scale = None
 631
 632                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 633                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 634
 635                if scale:
 636                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 637
 638                return to_unix
 639
 640            return self.expression(exp.Extract, this=this, expression=expression)
 641
 642        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 643            if is_map:
 644                # Keys are strings in Snowflake's objects, see also:
 645                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 646                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 647                return self._parse_slice(self._parse_string())
 648
 649            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 650
 651        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 652            lateral = super()._parse_lateral()
 653            if not lateral:
 654                return lateral
 655
 656            if isinstance(lateral.this, exp.Explode):
 657                table_alias = lateral.args.get("alias")
 658                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 659                if table_alias and not table_alias.args.get("columns"):
 660                    table_alias.set("columns", columns)
 661                elif not table_alias:
 662                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 663
 664            return lateral
 665
 666        def _parse_table_parts(
 667            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 668        ) -> exp.Table:
 669            # https://docs.snowflake.com/en/user-guide/querying-stage
 670            if self._match(TokenType.STRING, advance=False):
 671                table = self._parse_string()
 672            elif self._match_text_seq("@", advance=False):
 673                table = self._parse_location_path()
 674            else:
 675                table = None
 676
 677            if table:
 678                file_format = None
 679                pattern = None
 680
 681                wrapped = self._match(TokenType.L_PAREN)
 682                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 683                    if self._match_text_seq("FILE_FORMAT", "=>"):
 684                        file_format = self._parse_string() or super()._parse_table_parts(
 685                            is_db_reference=is_db_reference
 686                        )
 687                    elif self._match_text_seq("PATTERN", "=>"):
 688                        pattern = self._parse_string()
 689                    else:
 690                        break
 691
 692                    self._match(TokenType.COMMA)
 693
 694                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 695            else:
 696                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 697
 698            return table
 699
 700        def _parse_id_var(
 701            self,
 702            any_token: bool = True,
 703            tokens: t.Optional[t.Collection[TokenType]] = None,
 704        ) -> t.Optional[exp.Expression]:
 705            if self._match_text_seq("IDENTIFIER", "("):
 706                identifier = (
 707                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 708                    or self._parse_string()
 709                )
 710                self._match_r_paren()
 711                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 712
 713            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 714
 715        def _parse_show_snowflake(self, this: str) -> exp.Show:
 716            scope = None
 717            scope_kind = None
 718
 719            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 720            # which is syntactically valid but has no effect on the output
 721            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 722
 723            history = self._match_text_seq("HISTORY")
 724
 725            like = self._parse_string() if self._match(TokenType.LIKE) else None
 726
 727            if self._match(TokenType.IN):
 728                if self._match_text_seq("ACCOUNT"):
 729                    scope_kind = "ACCOUNT"
 730                elif self._match_set(self.DB_CREATABLES):
 731                    scope_kind = self._prev.text.upper()
 732                    if self._curr:
 733                        scope = self._parse_table_parts()
 734                elif self._curr:
 735                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 736                    scope = self._parse_table_parts()
 737
 738            return self.expression(
 739                exp.Show,
 740                **{
 741                    "terse": terse,
 742                    "this": this,
 743                    "history": history,
 744                    "like": like,
 745                    "scope": scope,
 746                    "scope_kind": scope_kind,
 747                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 748                    "limit": self._parse_limit(),
 749                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 750                },
 751            )
 752
 753        def _parse_location_property(self) -> exp.LocationProperty:
 754            self._match(TokenType.EQ)
 755            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 756
 757        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 758            # Parse either a subquery or a staged file
 759            return (
 760                self._parse_select(table=True, parse_subquery_alias=False)
 761                if self._match(TokenType.L_PAREN, advance=False)
 762                else self._parse_table_parts()
 763            )
 764
 765        def _parse_location_path(self) -> exp.Var:
 766            parts = [self._advance_any(ignore_reserved=True)]
 767
 768            # We avoid consuming a comma token because external tables like @foo and @bar
 769            # can be joined in a query with a comma separator, as well as closing paren
 770            # in case of subqueries
 771            while self._is_connected() and not self._match_set(
 772                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 773            ):
 774                parts.append(self._advance_any(ignore_reserved=True))
 775
 776            return exp.var("".join(part.text for part in parts if part))
 777
 778        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 779            this = super()._parse_lambda_arg()
 780
 781            if not this:
 782                return this
 783
 784            typ = self._parse_types()
 785
 786            if typ:
 787                return self.expression(exp.Cast, this=this, to=typ)
 788
 789            return this
 790
 791    class Tokenizer(tokens.Tokenizer):
 792        STRING_ESCAPES = ["\\", "'"]
 793        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 794        RAW_STRINGS = ["$$"]
 795        COMMENTS = ["--", "//", ("/*", "*/")]
 796        NESTED_COMMENTS = False
 797
 798        KEYWORDS = {
 799            **tokens.Tokenizer.KEYWORDS,
 800            "BYTEINT": TokenType.INT,
 801            "CHAR VARYING": TokenType.VARCHAR,
 802            "CHARACTER VARYING": TokenType.VARCHAR,
 803            "EXCLUDE": TokenType.EXCEPT,
 804            "ILIKE ANY": TokenType.ILIKE_ANY,
 805            "LIKE ANY": TokenType.LIKE_ANY,
 806            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 807            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 808            "MINUS": TokenType.EXCEPT,
 809            "NCHAR VARYING": TokenType.VARCHAR,
 810            "PUT": TokenType.COMMAND,
 811            "REMOVE": TokenType.COMMAND,
 812            "RM": TokenType.COMMAND,
 813            "SAMPLE": TokenType.TABLE_SAMPLE,
 814            "SQL_DOUBLE": TokenType.DOUBLE,
 815            "SQL_VARCHAR": TokenType.VARCHAR,
 816            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 817            "TAG": TokenType.TAG,
 818            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 819            "TOP": TokenType.TOP,
 820            "WAREHOUSE": TokenType.WAREHOUSE,
 821            "STREAMLIT": TokenType.STREAMLIT,
 822        }
 823        KEYWORDS.pop("/*+")
 824
 825        SINGLE_TOKENS = {
 826            **tokens.Tokenizer.SINGLE_TOKENS,
 827            "$": TokenType.PARAMETER,
 828        }
 829
 830        VAR_SINGLE_TOKENS = {"$"}
 831
 832        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 833
 834    class Generator(generator.Generator):
 835        PARAMETER_TOKEN = "$"
 836        MATCHED_BY_SOURCE = False
 837        SINGLE_STRING_INTERVAL = True
 838        JOIN_HINTS = False
 839        TABLE_HINTS = False
 840        QUERY_HINTS = False
 841        AGGREGATE_FILTER_SUPPORTED = False
 842        SUPPORTS_TABLE_COPY = False
 843        COLLATE_IS_FUNC = True
 844        LIMIT_ONLY_LITERALS = True
 845        JSON_KEY_VALUE_PAIR_SEP = ","
 846        INSERT_OVERWRITE = " OVERWRITE INTO"
 847        STRUCT_DELIMITER = ("(", ")")
 848        COPY_PARAMS_ARE_WRAPPED = False
 849        COPY_PARAMS_EQ_REQUIRED = True
 850        STAR_EXCEPT = "EXCLUDE"
 851        SUPPORTS_EXPLODING_PROJECTIONS = False
 852        ARRAY_CONCAT_IS_VAR_LEN = False
 853        SUPPORTS_CONVERT_TIMEZONE = True
 854        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 855        SUPPORTS_MEDIAN = True
 856        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 857
 858        TRANSFORMS = {
 859            **generator.Generator.TRANSFORMS,
 860            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 861            exp.ArgMax: rename_func("MAX_BY"),
 862            exp.ArgMin: rename_func("MIN_BY"),
 863            exp.Array: inline_array_sql,
 864            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 865            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 866            exp.AtTimeZone: lambda self, e: self.func(
 867                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 868            ),
 869            exp.BitwiseXor: rename_func("BITXOR"),
 870            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 871            exp.DateAdd: date_delta_sql("DATEADD"),
 872            exp.DateDiff: date_delta_sql("DATEDIFF"),
 873            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 874            exp.DatetimeDiff: timestampdiff_sql,
 875            exp.DateStrToDate: datestrtodate_sql,
 876            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 877            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 878            exp.DayOfYear: rename_func("DAYOFYEAR"),
 879            exp.Explode: rename_func("FLATTEN"),
 880            exp.Extract: rename_func("DATE_PART"),
 881            exp.FromTimeZone: lambda self, e: self.func(
 882                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 883            ),
 884            exp.GenerateSeries: lambda self, e: self.func(
 885                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 886            ),
 887            exp.GroupConcat: rename_func("LISTAGG"),
 888            exp.If: if_sql(name="IFF", false_value="NULL"),
 889            exp.JSONExtractArray: _json_extract_value_array_sql,
 890            exp.JSONExtractScalar: lambda self, e: self.func(
 891                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 892            ),
 893            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 894            exp.JSONPathRoot: lambda *_: "",
 895            exp.JSONValueArray: _json_extract_value_array_sql,
 896            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 897            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 898            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 899            exp.MakeInterval: no_make_interval_sql,
 900            exp.Max: max_or_greatest,
 901            exp.Min: min_or_least,
 902            exp.ParseJSON: lambda self, e: self.func(
 903                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 904            ),
 905            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 906            exp.PercentileCont: transforms.preprocess(
 907                [transforms.add_within_group_for_percentiles]
 908            ),
 909            exp.PercentileDisc: transforms.preprocess(
 910                [transforms.add_within_group_for_percentiles]
 911            ),
 912            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 913            exp.RegexpExtract: _regexpextract_sql,
 914            exp.RegexpExtractAll: _regexpextract_sql,
 915            exp.RegexpILike: _regexpilike_sql,
 916            exp.Rand: rename_func("RANDOM"),
 917            exp.Select: transforms.preprocess(
 918                [
 919                    transforms.eliminate_distinct_on,
 920                    transforms.explode_to_unnest(),
 921                    transforms.eliminate_semi_and_anti_joins,
 922                    _transform_generate_date_array,
 923                ]
 924            ),
 925            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 926            exp.SHA: rename_func("SHA1"),
 927            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 928            exp.StartsWith: rename_func("STARTSWITH"),
 929            exp.StrPosition: lambda self, e: self.func(
 930                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 931            ),
 932            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 933            exp.Stuff: rename_func("INSERT"),
 934            exp.TimeAdd: date_delta_sql("TIMEADD"),
 935            exp.Timestamp: no_timestamp_sql,
 936            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 937            exp.TimestampDiff: lambda self, e: self.func(
 938                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 939            ),
 940            exp.TimestampTrunc: timestamptrunc_sql(),
 941            exp.TimeStrToTime: timestrtotime_sql,
 942            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 943            exp.ToArray: rename_func("TO_ARRAY"),
 944            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 945            exp.ToDouble: rename_func("TO_DOUBLE"),
 946            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 947            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 948            exp.TsOrDsToDate: lambda self, e: self.func(
 949                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 950            ),
 951            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 952            exp.Uuid: rename_func("UUID_STRING"),
 953            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 954            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 955            exp.Xor: rename_func("BOOLXOR"),
 956            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 957                rename_func("EDITDISTANCE")
 958            ),
 959        }
 960
 961        SUPPORTED_JSON_PATH_PARTS = {
 962            exp.JSONPathKey,
 963            exp.JSONPathRoot,
 964            exp.JSONPathSubscript,
 965        }
 966
 967        TYPE_MAPPING = {
 968            **generator.Generator.TYPE_MAPPING,
 969            exp.DataType.Type.NESTED: "OBJECT",
 970            exp.DataType.Type.STRUCT: "OBJECT",
 971        }
 972
 973        PROPERTIES_LOCATION = {
 974            **generator.Generator.PROPERTIES_LOCATION,
 975            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 976            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 977        }
 978
 979        UNSUPPORTED_VALUES_EXPRESSIONS = {
 980            exp.Map,
 981            exp.StarMap,
 982            exp.Struct,
 983            exp.VarMap,
 984        }
 985
 986        def with_properties(self, properties: exp.Properties) -> str:
 987            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 988
 989        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 990            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 991                values_as_table = False
 992
 993            return super().values_sql(expression, values_as_table=values_as_table)
 994
 995        def datatype_sql(self, expression: exp.DataType) -> str:
 996            expressions = expression.expressions
 997            if (
 998                expressions
 999                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1000                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1001            ):
1002                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1003                return "OBJECT"
1004
1005            return super().datatype_sql(expression)
1006
1007        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1008            return self.func(
1009                "TO_NUMBER",
1010                expression.this,
1011                expression.args.get("format"),
1012                expression.args.get("precision"),
1013                expression.args.get("scale"),
1014            )
1015
1016        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1017            milli = expression.args.get("milli")
1018            if milli is not None:
1019                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1020                expression.set("nano", milli_to_nano)
1021
1022            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1023
1024        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1025            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1026                return self.func("TO_GEOGRAPHY", expression.this)
1027            if expression.is_type(exp.DataType.Type.GEOMETRY):
1028                return self.func("TO_GEOMETRY", expression.this)
1029
1030            return super().cast_sql(expression, safe_prefix=safe_prefix)
1031
1032        def trycast_sql(self, expression: exp.TryCast) -> str:
1033            value = expression.this
1034
1035            if value.type is None:
1036                from sqlglot.optimizer.annotate_types import annotate_types
1037
1038                value = annotate_types(value)
1039
1040            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1041                return super().trycast_sql(expression)
1042
1043            # TRY_CAST only works for string values in Snowflake
1044            return self.cast_sql(expression)
1045
1046        def log_sql(self, expression: exp.Log) -> str:
1047            if not expression.expression:
1048                return self.func("LN", expression.this)
1049
1050            return super().log_sql(expression)
1051
1052        def unnest_sql(self, expression: exp.Unnest) -> str:
1053            unnest_alias = expression.args.get("alias")
1054            offset = expression.args.get("offset")
1055
1056            columns = [
1057                exp.to_identifier("seq"),
1058                exp.to_identifier("key"),
1059                exp.to_identifier("path"),
1060                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1061                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1062                or exp.to_identifier("value"),
1063                exp.to_identifier("this"),
1064            ]
1065
1066            if unnest_alias:
1067                unnest_alias.set("columns", columns)
1068            else:
1069                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1070
1071            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1072            alias = self.sql(unnest_alias)
1073            alias = f" AS {alias}" if alias else ""
1074            return f"{explode}{alias}"
1075
1076        def show_sql(self, expression: exp.Show) -> str:
1077            terse = "TERSE " if expression.args.get("terse") else ""
1078            history = " HISTORY" if expression.args.get("history") else ""
1079            like = self.sql(expression, "like")
1080            like = f" LIKE {like}" if like else ""
1081
1082            scope = self.sql(expression, "scope")
1083            scope = f" {scope}" if scope else ""
1084
1085            scope_kind = self.sql(expression, "scope_kind")
1086            if scope_kind:
1087                scope_kind = f" IN {scope_kind}"
1088
1089            starts_with = self.sql(expression, "starts_with")
1090            if starts_with:
1091                starts_with = f" STARTS WITH {starts_with}"
1092
1093            limit = self.sql(expression, "limit")
1094
1095            from_ = self.sql(expression, "from")
1096            if from_:
1097                from_ = f" FROM {from_}"
1098
1099            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1100
1101        def describe_sql(self, expression: exp.Describe) -> str:
1102            # Default to table if kind is unknown
1103            kind_value = expression.args.get("kind") or "TABLE"
1104            kind = f" {kind_value}" if kind_value else ""
1105            this = f" {self.sql(expression, 'this')}"
1106            expressions = self.expressions(expression, flat=True)
1107            expressions = f" {expressions}" if expressions else ""
1108            return f"DESCRIBE{kind}{this}{expressions}"
1109
1110        def generatedasidentitycolumnconstraint_sql(
1111            self, expression: exp.GeneratedAsIdentityColumnConstraint
1112        ) -> str:
1113            start = expression.args.get("start")
1114            start = f" START {start}" if start else ""
1115            increment = expression.args.get("increment")
1116            increment = f" INCREMENT {increment}" if increment else ""
1117            return f"AUTOINCREMENT{start}{increment}"
1118
1119        def cluster_sql(self, expression: exp.Cluster) -> str:
1120            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1121
1122        def struct_sql(self, expression: exp.Struct) -> str:
1123            keys = []
1124            values = []
1125
1126            for i, e in enumerate(expression.expressions):
1127                if isinstance(e, exp.PropertyEQ):
1128                    keys.append(
1129                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1130                    )
1131                    values.append(e.expression)
1132                else:
1133                    keys.append(exp.Literal.string(f"_{i}"))
1134                    values.append(e)
1135
1136            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1137
1138        @unsupported_args("weight", "accuracy")
1139        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1140            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1141
1142        def alterset_sql(self, expression: exp.AlterSet) -> str:
1143            exprs = self.expressions(expression, flat=True)
1144            exprs = f" {exprs}" if exprs else ""
1145            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1146            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1147            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1148            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1149            tag = self.expressions(expression, key="tag", flat=True)
1150            tag = f" TAG {tag}" if tag else ""
1151
1152            return f"SET{exprs}{file_format}{copy_options}{tag}"
1153
1154        def strtotime_sql(self, expression: exp.StrToTime):
1155            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1156            return self.func(
1157                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1158            )
1159
1160        def timestampsub_sql(self, expression: exp.TimestampSub):
1161            return self.sql(
1162                exp.TimestampAdd(
1163                    this=expression.this,
1164                    expression=expression.expression * -1,
1165                    unit=expression.unit,
1166                )
1167            )
1168
1169        def jsonextract_sql(self, expression: exp.JSONExtract):
1170            this = expression.this
1171
1172            # JSON strings are valid coming from other dialects such as BQ
1173            return self.func(
1174                "GET_PATH",
1175                exp.ParseJSON(this=this) if this.is_string else this,
1176                expression.expression,
1177            )
1178
1179        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1180            this = expression.this
1181            if not isinstance(this, exp.TsOrDsToTimestamp):
1182                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1183
1184            return self.func("TO_CHAR", this, self.format_time(expression))
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

ARRAY_AGG_INCLUDES_NULLS: Optional[bool] = None

Whether ArrayAgg needs to filter NULL values.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
354    def quote_identifier(self, expression: E, identify: bool = True) -> E:
355        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
356        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
357        if (
358            isinstance(expression, exp.Identifier)
359            and isinstance(expression.parent, exp.Table)
360            and expression.name.lower() == "dual"
361        ):
362            return expression  # type: ignore
363
364        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.tokens.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {}
INVERSE_FORMAT_TRIE: Dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
366    class Parser(parser.Parser):
367        IDENTIFY_PIVOT_STRINGS = True
368        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
369        COLON_IS_VARIANT_EXTRACT = True
370
371        ID_VAR_TOKENS = {
372            *parser.Parser.ID_VAR_TOKENS,
373            TokenType.MATCH_CONDITION,
374        }
375
376        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
377        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
378
379        FUNCTIONS = {
380            **parser.Parser.FUNCTIONS,
381            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
382            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
383            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
384                this=seq_get(args, 1), expression=seq_get(args, 0)
385            ),
386            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
387                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
388                start=seq_get(args, 0),
389                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
390                step=seq_get(args, 2),
391            ),
392            "BITXOR": binary_from_function(exp.BitwiseXor),
393            "BIT_XOR": binary_from_function(exp.BitwiseXor),
394            "BOOLXOR": binary_from_function(exp.Xor),
395            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
396            "DATE_TRUNC": _date_trunc_to_time,
397            "DATEADD": _build_date_time_add(exp.DateAdd),
398            "DATEDIFF": _build_datediff,
399            "DIV0": _build_if_from_div0,
400            "EDITDISTANCE": lambda args: exp.Levenshtein(
401                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
402            ),
403            "FLATTEN": exp.Explode.from_arg_list,
404            "GET_PATH": lambda args, dialect: exp.JSONExtract(
405                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
406            ),
407            "IFF": exp.If.from_arg_list,
408            "LAST_DAY": lambda args: exp.LastDay(
409                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
410            ),
411            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
412            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
413            "LISTAGG": exp.GroupConcat.from_arg_list,
414            "NULLIFZERO": _build_if_from_nullifzero,
415            "OBJECT_CONSTRUCT": _build_object_construct,
416            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
417            "REGEXP_REPLACE": _build_regexp_replace,
418            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
419            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
420            "RLIKE": exp.RegexpLike.from_arg_list,
421            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
422            "TIMEADD": _build_date_time_add(exp.TimeAdd),
423            "TIMEDIFF": _build_datediff,
424            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
425            "TIMESTAMPDIFF": _build_datediff,
426            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
427            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
428            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
429            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
430            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
431            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
432            "TRY_TO_TIMESTAMP": _build_datetime(
433                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
434            ),
435            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
436            "TO_NUMBER": lambda args: exp.ToNumber(
437                this=seq_get(args, 0),
438                format=seq_get(args, 1),
439                precision=seq_get(args, 2),
440                scale=seq_get(args, 3),
441            ),
442            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
443            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
444            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
445            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
446            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
447            "TO_VARCHAR": exp.ToChar.from_arg_list,
448            "ZEROIFNULL": _build_if_from_zeroifnull,
449        }
450
451        FUNCTION_PARSERS = {
452            **parser.Parser.FUNCTION_PARSERS,
453            "DATE_PART": lambda self: self._parse_date_part(),
454            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
455        }
456        FUNCTION_PARSERS.pop("TRIM")
457
458        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
459
460        RANGE_PARSERS = {
461            **parser.Parser.RANGE_PARSERS,
462            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
463            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
464        }
465
466        ALTER_PARSERS = {
467            **parser.Parser.ALTER_PARSERS,
468            "UNSET": lambda self: self.expression(
469                exp.Set,
470                tag=self._match_text_seq("TAG"),
471                expressions=self._parse_csv(self._parse_id_var),
472                unset=True,
473            ),
474        }
475
476        STATEMENT_PARSERS = {
477            **parser.Parser.STATEMENT_PARSERS,
478            TokenType.SHOW: lambda self: self._parse_show(),
479        }
480
481        PROPERTY_PARSERS = {
482            **parser.Parser.PROPERTY_PARSERS,
483            "LOCATION": lambda self: self._parse_location_property(),
484            "TAG": lambda self: self._parse_tag(),
485        }
486
487        TYPE_CONVERTERS = {
488            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
489            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
490        }
491
492        SHOW_PARSERS = {
493            "SCHEMAS": _show_parser("SCHEMAS"),
494            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
495            "OBJECTS": _show_parser("OBJECTS"),
496            "TERSE OBJECTS": _show_parser("OBJECTS"),
497            "TABLES": _show_parser("TABLES"),
498            "TERSE TABLES": _show_parser("TABLES"),
499            "VIEWS": _show_parser("VIEWS"),
500            "TERSE VIEWS": _show_parser("VIEWS"),
501            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
502            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
503            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
504            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
505            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
506            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
507            "SEQUENCES": _show_parser("SEQUENCES"),
508            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
509            "COLUMNS": _show_parser("COLUMNS"),
510            "USERS": _show_parser("USERS"),
511            "TERSE USERS": _show_parser("USERS"),
512        }
513
514        CONSTRAINT_PARSERS = {
515            **parser.Parser.CONSTRAINT_PARSERS,
516            "WITH": lambda self: self._parse_with_constraint(),
517            "MASKING": lambda self: self._parse_with_constraint(),
518            "PROJECTION": lambda self: self._parse_with_constraint(),
519            "TAG": lambda self: self._parse_with_constraint(),
520        }
521
522        STAGED_FILE_SINGLE_TOKENS = {
523            TokenType.DOT,
524            TokenType.MOD,
525            TokenType.SLASH,
526        }
527
528        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
529
530        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
531
532        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
533
534        LAMBDAS = {
535            **parser.Parser.LAMBDAS,
536            TokenType.ARROW: lambda self, expressions: self.expression(
537                exp.Lambda,
538                this=self._replace_lambda(
539                    self._parse_assignment(),
540                    expressions,
541                ),
542                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
543            ),
544        }
545
546        def _negate_range(
547            self, this: t.Optional[exp.Expression] = None
548        ) -> t.Optional[exp.Expression]:
549            if not this:
550                return this
551
552            query = this.args.get("query")
553            if isinstance(this, exp.In) and isinstance(query, exp.Query):
554                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
555                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
556                # which can produce different results (most likely a SnowFlake bug).
557                #
558                # https://docs.snowflake.com/en/sql-reference/functions/in
559                # Context: https://github.com/tobymao/sqlglot/issues/3890
560                return self.expression(
561                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
562                )
563
564            return self.expression(exp.Not, this=this)
565
566        def _parse_tag(self) -> exp.Tags:
567            return self.expression(
568                exp.Tags,
569                expressions=self._parse_wrapped_csv(self._parse_property),
570            )
571
572        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
573            if self._prev.token_type != TokenType.WITH:
574                self._retreat(self._index - 1)
575
576            if self._match_text_seq("MASKING", "POLICY"):
577                policy = self._parse_column()
578                return self.expression(
579                    exp.MaskingPolicyColumnConstraint,
580                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
581                    expressions=self._match(TokenType.USING)
582                    and self._parse_wrapped_csv(self._parse_id_var),
583                )
584            if self._match_text_seq("PROJECTION", "POLICY"):
585                policy = self._parse_column()
586                return self.expression(
587                    exp.ProjectionPolicyColumnConstraint,
588                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
589                )
590            if self._match(TokenType.TAG):
591                return self._parse_tag()
592
593            return None
594
595        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
596            if self._match(TokenType.TAG):
597                return self._parse_tag()
598
599            return super()._parse_with_property()
600
601        def _parse_create(self) -> exp.Create | exp.Command:
602            expression = super()._parse_create()
603            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
604                # Replace the Table node with the enclosed Identifier
605                expression.this.replace(expression.this.this)
606
607            return expression
608
609        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
610        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
611        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
612            this = self._parse_var() or self._parse_type()
613
614            if not this:
615                return None
616
617            self._match(TokenType.COMMA)
618            expression = self._parse_bitwise()
619            this = map_date_part(this)
620            name = this.name.upper()
621
622            if name.startswith("EPOCH"):
623                if name == "EPOCH_MILLISECOND":
624                    scale = 10**3
625                elif name == "EPOCH_MICROSECOND":
626                    scale = 10**6
627                elif name == "EPOCH_NANOSECOND":
628                    scale = 10**9
629                else:
630                    scale = None
631
632                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
633                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
634
635                if scale:
636                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
637
638                return to_unix
639
640            return self.expression(exp.Extract, this=this, expression=expression)
641
642        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
643            if is_map:
644                # Keys are strings in Snowflake's objects, see also:
645                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
646                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
647                return self._parse_slice(self._parse_string())
648
649            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
650
651        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
652            lateral = super()._parse_lateral()
653            if not lateral:
654                return lateral
655
656            if isinstance(lateral.this, exp.Explode):
657                table_alias = lateral.args.get("alias")
658                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
659                if table_alias and not table_alias.args.get("columns"):
660                    table_alias.set("columns", columns)
661                elif not table_alias:
662                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
663
664            return lateral
665
666        def _parse_table_parts(
667            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
668        ) -> exp.Table:
669            # https://docs.snowflake.com/en/user-guide/querying-stage
670            if self._match(TokenType.STRING, advance=False):
671                table = self._parse_string()
672            elif self._match_text_seq("@", advance=False):
673                table = self._parse_location_path()
674            else:
675                table = None
676
677            if table:
678                file_format = None
679                pattern = None
680
681                wrapped = self._match(TokenType.L_PAREN)
682                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
683                    if self._match_text_seq("FILE_FORMAT", "=>"):
684                        file_format = self._parse_string() or super()._parse_table_parts(
685                            is_db_reference=is_db_reference
686                        )
687                    elif self._match_text_seq("PATTERN", "=>"):
688                        pattern = self._parse_string()
689                    else:
690                        break
691
692                    self._match(TokenType.COMMA)
693
694                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
695            else:
696                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
697
698            return table
699
700        def _parse_id_var(
701            self,
702            any_token: bool = True,
703            tokens: t.Optional[t.Collection[TokenType]] = None,
704        ) -> t.Optional[exp.Expression]:
705            if self._match_text_seq("IDENTIFIER", "("):
706                identifier = (
707                    super()._parse_id_var(any_token=any_token, tokens=tokens)
708                    or self._parse_string()
709                )
710                self._match_r_paren()
711                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
712
713            return super()._parse_id_var(any_token=any_token, tokens=tokens)
714
715        def _parse_show_snowflake(self, this: str) -> exp.Show:
716            scope = None
717            scope_kind = None
718
719            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
720            # which is syntactically valid but has no effect on the output
721            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
722
723            history = self._match_text_seq("HISTORY")
724
725            like = self._parse_string() if self._match(TokenType.LIKE) else None
726
727            if self._match(TokenType.IN):
728                if self._match_text_seq("ACCOUNT"):
729                    scope_kind = "ACCOUNT"
730                elif self._match_set(self.DB_CREATABLES):
731                    scope_kind = self._prev.text.upper()
732                    if self._curr:
733                        scope = self._parse_table_parts()
734                elif self._curr:
735                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
736                    scope = self._parse_table_parts()
737
738            return self.expression(
739                exp.Show,
740                **{
741                    "terse": terse,
742                    "this": this,
743                    "history": history,
744                    "like": like,
745                    "scope": scope,
746                    "scope_kind": scope_kind,
747                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
748                    "limit": self._parse_limit(),
749                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
750                },
751            )
752
753        def _parse_location_property(self) -> exp.LocationProperty:
754            self._match(TokenType.EQ)
755            return self.expression(exp.LocationProperty, this=self._parse_location_path())
756
757        def _parse_file_location(self) -> t.Optional[exp.Expression]:
758            # Parse either a subquery or a staged file
759            return (
760                self._parse_select(table=True, parse_subquery_alias=False)
761                if self._match(TokenType.L_PAREN, advance=False)
762                else self._parse_table_parts()
763            )
764
765        def _parse_location_path(self) -> exp.Var:
766            parts = [self._advance_any(ignore_reserved=True)]
767
768            # We avoid consuming a comma token because external tables like @foo and @bar
769            # can be joined in a query with a comma separator, as well as closing paren
770            # in case of subqueries
771            while self._is_connected() and not self._match_set(
772                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
773            ):
774                parts.append(self._advance_any(ignore_reserved=True))
775
776            return exp.var("".join(part.text for part in parts if part))
777
778        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
779            this = super()._parse_lambda_arg()
780
781            if not this:
782                return this
783
784            typ = self._parse_types()
785
786            if typ:
787                return self.expression(exp.Cast, this=this, to=typ)
788
789            return this

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_VARIANT_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.ROWS: 'ROWS'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.RENAME: 'RENAME'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.LIST: 'LIST'>, <TokenType.TIME: 'TIME'>, <TokenType.INET: 'INET'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.VAR: 'VAR'>, <TokenType.DETACH: 'DETACH'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.TABLE: 'TABLE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.DATE: 'DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.ENUM: 'ENUM'>, <TokenType.KEEP: 'KEEP'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.YEAR: 'YEAR'>, <TokenType.COPY: 'COPY'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.MAP: 'MAP'>, <TokenType.BIT: 'BIT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.ANY: 'ANY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.DATE32: 'DATE32'>, <TokenType.ALL: 'ALL'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.KILL: 'KILL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.END: 'END'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.MODEL: 'MODEL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.APPLY: 'APPLY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.USE: 'USE'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.SHOW: 'SHOW'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.FALSE: 'FALSE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.SOME: 'SOME'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ANTI: 'ANTI'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UINT256: 'UINT256'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.SINK: 'SINK'>, <TokenType.TOP: 'TOP'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.SUPER: 'SUPER'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.VIEW: 'VIEW'>, <TokenType.CHAR: 'CHAR'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.LOAD: 'LOAD'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TEXT: 'TEXT'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.MONEY: 'MONEY'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.MERGE: 'MERGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.DELETE: 'DELETE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.RING: 'RING'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.NAME: 'NAME'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DIV: 'DIV'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.NULL: 'NULL'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.CUBE: 'CUBE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.TAG: 'TAG'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.UINT: 'UINT'>, <TokenType.ASC: 'ASC'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.LEFT: 'LEFT'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.DESC: 'DESC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.CACHE: 'CACHE'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.ROW: 'ROW'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SET: 'SET'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.INT: 'INT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.POINT: 'POINT'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.JSON: 'JSON'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.UUID: 'UUID'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.RANGE: 'RANGE'>, <TokenType.ASOF: 'ASOF'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.XML: 'XML'>, <TokenType.INT256: 'INT256'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.INT128: 'INT128'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.IS: 'IS'>, <TokenType.JSONB: 'JSONB'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.NEXT: 'NEXT'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.FULL: 'FULL'>}
TABLE_ALIAS_TOKENS = {<TokenType.ROWS: 'ROWS'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.RENAME: 'RENAME'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.LIST: 'LIST'>, <TokenType.TIME: 'TIME'>, <TokenType.INET: 'INET'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.VAR: 'VAR'>, <TokenType.DETACH: 'DETACH'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.TABLE: 'TABLE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.DATE: 'DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.ENUM: 'ENUM'>, <TokenType.KEEP: 'KEEP'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.YEAR: 'YEAR'>, <TokenType.COPY: 'COPY'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.MAP: 'MAP'>, <TokenType.BIT: 'BIT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.ANY: 'ANY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.ALL: 'ALL'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.KILL: 'KILL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.END: 'END'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.MODEL: 'MODEL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.USE: 'USE'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.SHOW: 'SHOW'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.FALSE: 'FALSE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.SOME: 'SOME'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.ANTI: 'ANTI'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UINT256: 'UINT256'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.SINK: 'SINK'>, <TokenType.TOP: 'TOP'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.SUPER: 'SUPER'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.VIEW: 'VIEW'>, <TokenType.CHAR: 'CHAR'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.LOAD: 'LOAD'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TEXT: 'TEXT'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.MONEY: 'MONEY'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.MERGE: 'MERGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.DELETE: 'DELETE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.RING: 'RING'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.NAME: 'NAME'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DIV: 'DIV'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.NULL: 'NULL'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.CUBE: 'CUBE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.TAG: 'TAG'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.UINT: 'UINT'>, <TokenType.ASC: 'ASC'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.DESC: 'DESC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.CACHE: 'CACHE'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.ROW: 'ROW'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SET: 'SET'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.INT: 'INT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.POINT: 'POINT'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.JSON: 'JSON'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.UUID: 'UUID'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.RANGE: 'RANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.XML: 'XML'>, <TokenType.INT256: 'INT256'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.INT128: 'INT128'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.IS: 'IS'>, <TokenType.JSONB: 'JSONB'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.NEXT: 'NEXT'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.DATETIME: 'DATETIME'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPLY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Apply'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Columns'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Contains'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exists'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FEATURES_AT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FeaturesAtTime'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'INT64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Int64'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExists'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractArray'>>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'J_S_O_N_VALUE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONValueArray'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function Snowflake.Parser.<lambda>>, 'LEN': <function Snowflake.Parser.<lambda>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAKE_INTERVAL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MakeInterval'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MEDIAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Median'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'OVERLAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Overlay'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_EXTRACT_ALL': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SPLIT_PART': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SplitPart'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.String'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_DOUBLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDouble'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDatetime'>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_SECONDS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixSeconds'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'INSTR': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY_CONSTRUCT': <function Snowflake.Parser.<lambda>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'EDITDISTANCE': <function Snowflake.Parser.<lambda>>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_SUBSTR_ALL': <function _build_regexp_extract.<locals>._builder>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TIMESTAMPNTZFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_NTZ_FROM_PARTS': <function build_timestamp_from_parts>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'OVERLAY': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>}
RANGE_PARSERS = {<TokenType.AT_GT: 'AT_GT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.LT_AT: 'LT_AT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'AS': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SWAP': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.GRANT: 'GRANT'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WATERMARK': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.DOT: 'DOT'>, <TokenType.MOD: 'MOD'>, <TokenType.SLASH: 'SLASH'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'SEQUENCES', 'UNIQUE KEYS', 'IMPORTED KEYS', 'VIEWS', 'TABLES', 'OBJECTS'}
NON_TABLE_CREATABLES = {'WAREHOUSE', 'STREAMLIT', 'STORAGE INTEGRATION', 'TAG'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
INTERVAL_VARS
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
PROCEDURE_OPTIONS
EXECUTE_AS_OPTIONS
KEY_CONSTRAINT_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
PRIVILEGE_FOLLOW_TOKENS
DESCRIBE_STYLES
OPERATION_MODIFIERS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
791    class Tokenizer(tokens.Tokenizer):
792        STRING_ESCAPES = ["\\", "'"]
793        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
794        RAW_STRINGS = ["$$"]
795        COMMENTS = ["--", "//", ("/*", "*/")]
796        NESTED_COMMENTS = False
797
798        KEYWORDS = {
799            **tokens.Tokenizer.KEYWORDS,
800            "BYTEINT": TokenType.INT,
801            "CHAR VARYING": TokenType.VARCHAR,
802            "CHARACTER VARYING": TokenType.VARCHAR,
803            "EXCLUDE": TokenType.EXCEPT,
804            "ILIKE ANY": TokenType.ILIKE_ANY,
805            "LIKE ANY": TokenType.LIKE_ANY,
806            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
807            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
808            "MINUS": TokenType.EXCEPT,
809            "NCHAR VARYING": TokenType.VARCHAR,
810            "PUT": TokenType.COMMAND,
811            "REMOVE": TokenType.COMMAND,
812            "RM": TokenType.COMMAND,
813            "SAMPLE": TokenType.TABLE_SAMPLE,
814            "SQL_DOUBLE": TokenType.DOUBLE,
815            "SQL_VARCHAR": TokenType.VARCHAR,
816            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
817            "TAG": TokenType.TAG,
818            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
819            "TOP": TokenType.TOP,
820            "WAREHOUSE": TokenType.WAREHOUSE,
821            "STREAMLIT": TokenType.STREAMLIT,
822        }
823        KEYWORDS.pop("/*+")
824
825        SINGLE_TOKENS = {
826            **tokens.Tokenizer.SINGLE_TOKENS,
827            "$": TokenType.PARAMETER,
828        }
829
830        VAR_SINGLE_TOKENS = {"$"}
831
832        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'DECIMAL256': <TokenType.DECIMAL256: 'DECIMAL256'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.FETCH: 'FETCH'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.RENAME: 'RENAME'>, <TokenType.EXECUTE: 'EXECUTE'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 834    class Generator(generator.Generator):
 835        PARAMETER_TOKEN = "$"
 836        MATCHED_BY_SOURCE = False
 837        SINGLE_STRING_INTERVAL = True
 838        JOIN_HINTS = False
 839        TABLE_HINTS = False
 840        QUERY_HINTS = False
 841        AGGREGATE_FILTER_SUPPORTED = False
 842        SUPPORTS_TABLE_COPY = False
 843        COLLATE_IS_FUNC = True
 844        LIMIT_ONLY_LITERALS = True
 845        JSON_KEY_VALUE_PAIR_SEP = ","
 846        INSERT_OVERWRITE = " OVERWRITE INTO"
 847        STRUCT_DELIMITER = ("(", ")")
 848        COPY_PARAMS_ARE_WRAPPED = False
 849        COPY_PARAMS_EQ_REQUIRED = True
 850        STAR_EXCEPT = "EXCLUDE"
 851        SUPPORTS_EXPLODING_PROJECTIONS = False
 852        ARRAY_CONCAT_IS_VAR_LEN = False
 853        SUPPORTS_CONVERT_TIMEZONE = True
 854        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 855        SUPPORTS_MEDIAN = True
 856        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 857
 858        TRANSFORMS = {
 859            **generator.Generator.TRANSFORMS,
 860            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 861            exp.ArgMax: rename_func("MAX_BY"),
 862            exp.ArgMin: rename_func("MIN_BY"),
 863            exp.Array: inline_array_sql,
 864            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 865            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 866            exp.AtTimeZone: lambda self, e: self.func(
 867                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 868            ),
 869            exp.BitwiseXor: rename_func("BITXOR"),
 870            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 871            exp.DateAdd: date_delta_sql("DATEADD"),
 872            exp.DateDiff: date_delta_sql("DATEDIFF"),
 873            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 874            exp.DatetimeDiff: timestampdiff_sql,
 875            exp.DateStrToDate: datestrtodate_sql,
 876            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 877            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 878            exp.DayOfYear: rename_func("DAYOFYEAR"),
 879            exp.Explode: rename_func("FLATTEN"),
 880            exp.Extract: rename_func("DATE_PART"),
 881            exp.FromTimeZone: lambda self, e: self.func(
 882                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 883            ),
 884            exp.GenerateSeries: lambda self, e: self.func(
 885                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 886            ),
 887            exp.GroupConcat: rename_func("LISTAGG"),
 888            exp.If: if_sql(name="IFF", false_value="NULL"),
 889            exp.JSONExtractArray: _json_extract_value_array_sql,
 890            exp.JSONExtractScalar: lambda self, e: self.func(
 891                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 892            ),
 893            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 894            exp.JSONPathRoot: lambda *_: "",
 895            exp.JSONValueArray: _json_extract_value_array_sql,
 896            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 897            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 898            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 899            exp.MakeInterval: no_make_interval_sql,
 900            exp.Max: max_or_greatest,
 901            exp.Min: min_or_least,
 902            exp.ParseJSON: lambda self, e: self.func(
 903                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 904            ),
 905            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 906            exp.PercentileCont: transforms.preprocess(
 907                [transforms.add_within_group_for_percentiles]
 908            ),
 909            exp.PercentileDisc: transforms.preprocess(
 910                [transforms.add_within_group_for_percentiles]
 911            ),
 912            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 913            exp.RegexpExtract: _regexpextract_sql,
 914            exp.RegexpExtractAll: _regexpextract_sql,
 915            exp.RegexpILike: _regexpilike_sql,
 916            exp.Rand: rename_func("RANDOM"),
 917            exp.Select: transforms.preprocess(
 918                [
 919                    transforms.eliminate_distinct_on,
 920                    transforms.explode_to_unnest(),
 921                    transforms.eliminate_semi_and_anti_joins,
 922                    _transform_generate_date_array,
 923                ]
 924            ),
 925            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 926            exp.SHA: rename_func("SHA1"),
 927            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 928            exp.StartsWith: rename_func("STARTSWITH"),
 929            exp.StrPosition: lambda self, e: self.func(
 930                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 931            ),
 932            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 933            exp.Stuff: rename_func("INSERT"),
 934            exp.TimeAdd: date_delta_sql("TIMEADD"),
 935            exp.Timestamp: no_timestamp_sql,
 936            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 937            exp.TimestampDiff: lambda self, e: self.func(
 938                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 939            ),
 940            exp.TimestampTrunc: timestamptrunc_sql(),
 941            exp.TimeStrToTime: timestrtotime_sql,
 942            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 943            exp.ToArray: rename_func("TO_ARRAY"),
 944            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 945            exp.ToDouble: rename_func("TO_DOUBLE"),
 946            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 947            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 948            exp.TsOrDsToDate: lambda self, e: self.func(
 949                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 950            ),
 951            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 952            exp.Uuid: rename_func("UUID_STRING"),
 953            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 954            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 955            exp.Xor: rename_func("BOOLXOR"),
 956            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 957                rename_func("EDITDISTANCE")
 958            ),
 959        }
 960
 961        SUPPORTED_JSON_PATH_PARTS = {
 962            exp.JSONPathKey,
 963            exp.JSONPathRoot,
 964            exp.JSONPathSubscript,
 965        }
 966
 967        TYPE_MAPPING = {
 968            **generator.Generator.TYPE_MAPPING,
 969            exp.DataType.Type.NESTED: "OBJECT",
 970            exp.DataType.Type.STRUCT: "OBJECT",
 971        }
 972
 973        PROPERTIES_LOCATION = {
 974            **generator.Generator.PROPERTIES_LOCATION,
 975            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 976            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 977        }
 978
 979        UNSUPPORTED_VALUES_EXPRESSIONS = {
 980            exp.Map,
 981            exp.StarMap,
 982            exp.Struct,
 983            exp.VarMap,
 984        }
 985
 986        def with_properties(self, properties: exp.Properties) -> str:
 987            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 988
 989        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 990            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 991                values_as_table = False
 992
 993            return super().values_sql(expression, values_as_table=values_as_table)
 994
 995        def datatype_sql(self, expression: exp.DataType) -> str:
 996            expressions = expression.expressions
 997            if (
 998                expressions
 999                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1000                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1001            ):
1002                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1003                return "OBJECT"
1004
1005            return super().datatype_sql(expression)
1006
1007        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1008            return self.func(
1009                "TO_NUMBER",
1010                expression.this,
1011                expression.args.get("format"),
1012                expression.args.get("precision"),
1013                expression.args.get("scale"),
1014            )
1015
1016        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1017            milli = expression.args.get("milli")
1018            if milli is not None:
1019                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1020                expression.set("nano", milli_to_nano)
1021
1022            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1023
1024        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1025            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1026                return self.func("TO_GEOGRAPHY", expression.this)
1027            if expression.is_type(exp.DataType.Type.GEOMETRY):
1028                return self.func("TO_GEOMETRY", expression.this)
1029
1030            return super().cast_sql(expression, safe_prefix=safe_prefix)
1031
1032        def trycast_sql(self, expression: exp.TryCast) -> str:
1033            value = expression.this
1034
1035            if value.type is None:
1036                from sqlglot.optimizer.annotate_types import annotate_types
1037
1038                value = annotate_types(value)
1039
1040            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1041                return super().trycast_sql(expression)
1042
1043            # TRY_CAST only works for string values in Snowflake
1044            return self.cast_sql(expression)
1045
1046        def log_sql(self, expression: exp.Log) -> str:
1047            if not expression.expression:
1048                return self.func("LN", expression.this)
1049
1050            return super().log_sql(expression)
1051
1052        def unnest_sql(self, expression: exp.Unnest) -> str:
1053            unnest_alias = expression.args.get("alias")
1054            offset = expression.args.get("offset")
1055
1056            columns = [
1057                exp.to_identifier("seq"),
1058                exp.to_identifier("key"),
1059                exp.to_identifier("path"),
1060                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1061                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1062                or exp.to_identifier("value"),
1063                exp.to_identifier("this"),
1064            ]
1065
1066            if unnest_alias:
1067                unnest_alias.set("columns", columns)
1068            else:
1069                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1070
1071            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1072            alias = self.sql(unnest_alias)
1073            alias = f" AS {alias}" if alias else ""
1074            return f"{explode}{alias}"
1075
1076        def show_sql(self, expression: exp.Show) -> str:
1077            terse = "TERSE " if expression.args.get("terse") else ""
1078            history = " HISTORY" if expression.args.get("history") else ""
1079            like = self.sql(expression, "like")
1080            like = f" LIKE {like}" if like else ""
1081
1082            scope = self.sql(expression, "scope")
1083            scope = f" {scope}" if scope else ""
1084
1085            scope_kind = self.sql(expression, "scope_kind")
1086            if scope_kind:
1087                scope_kind = f" IN {scope_kind}"
1088
1089            starts_with = self.sql(expression, "starts_with")
1090            if starts_with:
1091                starts_with = f" STARTS WITH {starts_with}"
1092
1093            limit = self.sql(expression, "limit")
1094
1095            from_ = self.sql(expression, "from")
1096            if from_:
1097                from_ = f" FROM {from_}"
1098
1099            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1100
1101        def describe_sql(self, expression: exp.Describe) -> str:
1102            # Default to table if kind is unknown
1103            kind_value = expression.args.get("kind") or "TABLE"
1104            kind = f" {kind_value}" if kind_value else ""
1105            this = f" {self.sql(expression, 'this')}"
1106            expressions = self.expressions(expression, flat=True)
1107            expressions = f" {expressions}" if expressions else ""
1108            return f"DESCRIBE{kind}{this}{expressions}"
1109
1110        def generatedasidentitycolumnconstraint_sql(
1111            self, expression: exp.GeneratedAsIdentityColumnConstraint
1112        ) -> str:
1113            start = expression.args.get("start")
1114            start = f" START {start}" if start else ""
1115            increment = expression.args.get("increment")
1116            increment = f" INCREMENT {increment}" if increment else ""
1117            return f"AUTOINCREMENT{start}{increment}"
1118
1119        def cluster_sql(self, expression: exp.Cluster) -> str:
1120            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1121
1122        def struct_sql(self, expression: exp.Struct) -> str:
1123            keys = []
1124            values = []
1125
1126            for i, e in enumerate(expression.expressions):
1127                if isinstance(e, exp.PropertyEQ):
1128                    keys.append(
1129                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1130                    )
1131                    values.append(e.expression)
1132                else:
1133                    keys.append(exp.Literal.string(f"_{i}"))
1134                    values.append(e)
1135
1136            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1137
1138        @unsupported_args("weight", "accuracy")
1139        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1140            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1141
1142        def alterset_sql(self, expression: exp.AlterSet) -> str:
1143            exprs = self.expressions(expression, flat=True)
1144            exprs = f" {exprs}" if exprs else ""
1145            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1146            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1147            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1148            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1149            tag = self.expressions(expression, key="tag", flat=True)
1150            tag = f" TAG {tag}" if tag else ""
1151
1152            return f"SET{exprs}{file_format}{copy_options}{tag}"
1153
1154        def strtotime_sql(self, expression: exp.StrToTime):
1155            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1156            return self.func(
1157                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1158            )
1159
1160        def timestampsub_sql(self, expression: exp.TimestampSub):
1161            return self.sql(
1162                exp.TimestampAdd(
1163                    this=expression.this,
1164                    expression=expression.expression * -1,
1165                    unit=expression.unit,
1166                )
1167            )
1168
1169        def jsonextract_sql(self, expression: exp.JSONExtract):
1170            this = expression.this
1171
1172            # JSON strings are valid coming from other dialects such as BQ
1173            return self.func(
1174                "GET_PATH",
1175                exp.ParseJSON(this=this) if this.is_string else this,
1176                expression.expression,
1177            )
1178
1179        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1180            this = expression.this
1181            if not isinstance(this, exp.TsOrDsToTimestamp):
1182                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1183
1184            return self.func("TO_CHAR", this, self.format_time(expression))

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
SUPPORTS_EXPLODING_PROJECTIONS = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_CONVERT_TIMEZONE = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_MEDIAN = True
ARRAY_SIZE_NAME = 'ARRAY_SIZE'
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Int64'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SwapTable'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Tags'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithProcedureOptions'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeDiff'>: <function timestampdiff_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtractArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONValueArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.MakeInterval'>: <function no_make_interval_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpExtract'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpExtractAll'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SafeDivide'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.Timestamp'>: <function no_timestamp_sql>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToDouble'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Levenshtein'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EncodeProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.IncludeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Tags'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithProcedureOptions'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.StarMap'>, <class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.Map'>, <class 'sqlglot.expressions.VarMap'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
986        def with_properties(self, properties: exp.Properties) -> str:
987            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
989        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
990            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
991                values_as_table = False
992
993            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
 995        def datatype_sql(self, expression: exp.DataType) -> str:
 996            expressions = expression.expressions
 997            if (
 998                expressions
 999                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1000                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1001            ):
1002                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1003                return "OBJECT"
1004
1005            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
1007        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1008            return self.func(
1009                "TO_NUMBER",
1010                expression.this,
1011                expression.args.get("format"),
1012                expression.args.get("precision"),
1013                expression.args.get("scale"),
1014            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
1016        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1017            milli = expression.args.get("milli")
1018            if milli is not None:
1019                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1020                expression.set("nano", milli_to_nano)
1021
1022            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
1024        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1025            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1026                return self.func("TO_GEOGRAPHY", expression.this)
1027            if expression.is_type(exp.DataType.Type.GEOMETRY):
1028                return self.func("TO_GEOMETRY", expression.this)
1029
1030            return super().cast_sql(expression, safe_prefix=safe_prefix)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
1032        def trycast_sql(self, expression: exp.TryCast) -> str:
1033            value = expression.this
1034
1035            if value.type is None:
1036                from sqlglot.optimizer.annotate_types import annotate_types
1037
1038                value = annotate_types(value)
1039
1040            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1041                return super().trycast_sql(expression)
1042
1043            # TRY_CAST only works for string values in Snowflake
1044            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
1046        def log_sql(self, expression: exp.Log) -> str:
1047            if not expression.expression:
1048                return self.func("LN", expression.this)
1049
1050            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
1052        def unnest_sql(self, expression: exp.Unnest) -> str:
1053            unnest_alias = expression.args.get("alias")
1054            offset = expression.args.get("offset")
1055
1056            columns = [
1057                exp.to_identifier("seq"),
1058                exp.to_identifier("key"),
1059                exp.to_identifier("path"),
1060                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1061                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1062                or exp.to_identifier("value"),
1063                exp.to_identifier("this"),
1064            ]
1065
1066            if unnest_alias:
1067                unnest_alias.set("columns", columns)
1068            else:
1069                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1070
1071            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1072            alias = self.sql(unnest_alias)
1073            alias = f" AS {alias}" if alias else ""
1074            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
1076        def show_sql(self, expression: exp.Show) -> str:
1077            terse = "TERSE " if expression.args.get("terse") else ""
1078            history = " HISTORY" if expression.args.get("history") else ""
1079            like = self.sql(expression, "like")
1080            like = f" LIKE {like}" if like else ""
1081
1082            scope = self.sql(expression, "scope")
1083            scope = f" {scope}" if scope else ""
1084
1085            scope_kind = self.sql(expression, "scope_kind")
1086            if scope_kind:
1087                scope_kind = f" IN {scope_kind}"
1088
1089            starts_with = self.sql(expression, "starts_with")
1090            if starts_with:
1091                starts_with = f" STARTS WITH {starts_with}"
1092
1093            limit = self.sql(expression, "limit")
1094
1095            from_ = self.sql(expression, "from")
1096            if from_:
1097                from_ = f" FROM {from_}"
1098
1099            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1101        def describe_sql(self, expression: exp.Describe) -> str:
1102            # Default to table if kind is unknown
1103            kind_value = expression.args.get("kind") or "TABLE"
1104            kind = f" {kind_value}" if kind_value else ""
1105            this = f" {self.sql(expression, 'this')}"
1106            expressions = self.expressions(expression, flat=True)
1107            expressions = f" {expressions}" if expressions else ""
1108            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1110        def generatedasidentitycolumnconstraint_sql(
1111            self, expression: exp.GeneratedAsIdentityColumnConstraint
1112        ) -> str:
1113            start = expression.args.get("start")
1114            start = f" START {start}" if start else ""
1115            increment = expression.args.get("increment")
1116            increment = f" INCREMENT {increment}" if increment else ""
1117            return f"AUTOINCREMENT{start}{increment}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1119        def cluster_sql(self, expression: exp.Cluster) -> str:
1120            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1122        def struct_sql(self, expression: exp.Struct) -> str:
1123            keys = []
1124            values = []
1125
1126            for i, e in enumerate(expression.expressions):
1127                if isinstance(e, exp.PropertyEQ):
1128                    keys.append(
1129                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1130                    )
1131                    values.append(e.expression)
1132                else:
1133                    keys.append(exp.Literal.string(f"_{i}"))
1134                    values.append(e)
1135
1136            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
@unsupported_args('weight', 'accuracy')
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1138        @unsupported_args("weight", "accuracy")
1139        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1140            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1142        def alterset_sql(self, expression: exp.AlterSet) -> str:
1143            exprs = self.expressions(expression, flat=True)
1144            exprs = f" {exprs}" if exprs else ""
1145            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1146            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1147            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1148            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1149            tag = self.expressions(expression, key="tag", flat=True)
1150            tag = f" TAG {tag}" if tag else ""
1151
1152            return f"SET{exprs}{file_format}{copy_options}{tag}"
def strtotime_sql(self, expression: sqlglot.expressions.StrToTime):
1154        def strtotime_sql(self, expression: exp.StrToTime):
1155            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1156            return self.func(
1157                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1158            )
def timestampsub_sql(self, expression: sqlglot.expressions.TimestampSub):
1160        def timestampsub_sql(self, expression: exp.TimestampSub):
1161            return self.sql(
1162                exp.TimestampAdd(
1163                    this=expression.this,
1164                    expression=expression.expression * -1,
1165                    unit=expression.unit,
1166                )
1167            )
def jsonextract_sql(self, expression: sqlglot.expressions.JSONExtract):
1169        def jsonextract_sql(self, expression: exp.JSONExtract):
1170            this = expression.this
1171
1172            # JSON strings are valid coming from other dialects such as BQ
1173            return self.func(
1174                "GET_PATH",
1175                exp.ParseJSON(this=this) if this.is_string else this,
1176                expression.expression,
1177            )
def timetostr_sql(self, expression: sqlglot.expressions.TimeToStr) -> str:
1179        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1180            this = expression.this
1181            if not isinstance(this, exp.TsOrDsToTimestamp):
1182                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1183
1184            return self.func("TO_CHAR", this, self.format_time(expression))
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'windows': <function Generator.<lambda>>, 'qualify': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
SUPPORTS_UNIX_SECONDS
PARSE_JSON_NAME
ARRAY_SIZE_DIM_REQUIRED
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterdiststyle_sql
altersortkey_sql
alterrename_sql
renamecolumn_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
try_sql
use_sql
binary
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodatetime_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonexists_sql
arrayagg_sql
apply_sql
grant_sql
grantprivilege_sql
grantprincipal_sql
columns_sql
overlay_sql
todouble_sql
string_sql
median_sql
overflowtruncatebehavior_sql
unixseconds_sql
arraysize_sql
attach_sql
detach_sql
attachoption_sql
featuresattime_sql
watermarkcolumnconstraint_sql
encodeproperty_sql
includeproperty_sql