Edit on GitHub

sqlglot.dialects.bigquery

   1from __future__ import annotations
   2
   3import logging
   4import re
   5import typing as t
   6
   7
   8from sqlglot.optimizer.annotate_types import TypeAnnotator
   9
  10from sqlglot import exp, generator, jsonpath, parser, tokens, transforms
  11from sqlglot._typing import E
  12from sqlglot.dialects.dialect import (
  13    Dialect,
  14    NormalizationStrategy,
  15    annotate_with_type_lambda,
  16    arg_max_or_min_no_count,
  17    binary_from_function,
  18    date_add_interval_sql,
  19    datestrtodate_sql,
  20    build_formatted_time,
  21    filter_array_using_unnest,
  22    if_sql,
  23    inline_array_unless_query,
  24    max_or_greatest,
  25    min_or_least,
  26    no_ilike_sql,
  27    build_date_delta_with_interval,
  28    regexp_replace_sql,
  29    rename_func,
  30    sha256_sql,
  31    timestrtotime_sql,
  32    ts_or_ds_add_cast,
  33    unit_to_var,
  34    strposition_sql,
  35    groupconcat_sql,
  36)
  37from sqlglot.helper import seq_get, split_num_words
  38from sqlglot.tokens import TokenType
  39from sqlglot.generator import unsupported_args
  40
  41if t.TYPE_CHECKING:
  42    from sqlglot._typing import Lit
  43
  44    from sqlglot.optimizer.annotate_types import TypeAnnotator
  45
  46logger = logging.getLogger("sqlglot")
  47
  48
  49JSON_EXTRACT_TYPE = t.Union[exp.JSONExtract, exp.JSONExtractScalar, exp.JSONExtractArray]
  50
  51DQUOTES_ESCAPING_JSON_FUNCTIONS = ("JSON_QUERY", "JSON_VALUE", "JSON_QUERY_ARRAY")
  52
  53
  54def _derived_table_values_to_unnest(self: BigQuery.Generator, expression: exp.Values) -> str:
  55    if not expression.find_ancestor(exp.From, exp.Join):
  56        return self.values_sql(expression)
  57
  58    structs = []
  59    alias = expression.args.get("alias")
  60    for tup in expression.find_all(exp.Tuple):
  61        field_aliases = (
  62            alias.columns
  63            if alias and alias.columns
  64            else (f"_c{i}" for i in range(len(tup.expressions)))
  65        )
  66        expressions = [
  67            exp.PropertyEQ(this=exp.to_identifier(name), expression=fld)
  68            for name, fld in zip(field_aliases, tup.expressions)
  69        ]
  70        structs.append(exp.Struct(expressions=expressions))
  71
  72    # Due to `UNNEST_COLUMN_ONLY`, it is expected that the table alias be contained in the columns expression
  73    alias_name_only = exp.TableAlias(columns=[alias.this]) if alias else None
  74    return self.unnest_sql(
  75        exp.Unnest(expressions=[exp.array(*structs, copy=False)], alias=alias_name_only)
  76    )
  77
  78
  79def _returnsproperty_sql(self: BigQuery.Generator, expression: exp.ReturnsProperty) -> str:
  80    this = expression.this
  81    if isinstance(this, exp.Schema):
  82        this = f"{self.sql(this, 'this')} <{self.expressions(this)}>"
  83    else:
  84        this = self.sql(this)
  85    return f"RETURNS {this}"
  86
  87
  88def _create_sql(self: BigQuery.Generator, expression: exp.Create) -> str:
  89    returns = expression.find(exp.ReturnsProperty)
  90    if expression.kind == "FUNCTION" and returns and returns.args.get("is_table"):
  91        expression.set("kind", "TABLE FUNCTION")
  92
  93        if isinstance(expression.expression, (exp.Subquery, exp.Literal)):
  94            expression.set("expression", expression.expression.this)
  95
  96    return self.create_sql(expression)
  97
  98
  99# https://issuetracker.google.com/issues/162294746
 100# workaround for bigquery bug when grouping by an expression and then ordering
 101# WITH x AS (SELECT 1 y)
 102# SELECT y + 1 z
 103# FROM x
 104# GROUP BY x + 1
 105# ORDER by z
 106def _alias_ordered_group(expression: exp.Expression) -> exp.Expression:
 107    if isinstance(expression, exp.Select):
 108        group = expression.args.get("group")
 109        order = expression.args.get("order")
 110
 111        if group and order:
 112            aliases = {
 113                select.this: select.args["alias"]
 114                for select in expression.selects
 115                if isinstance(select, exp.Alias)
 116            }
 117
 118            for grouped in group.expressions:
 119                if grouped.is_int:
 120                    continue
 121                alias = aliases.get(grouped)
 122                if alias:
 123                    grouped.replace(exp.column(alias))
 124
 125    return expression
 126
 127
 128def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression:
 129    """BigQuery doesn't allow column names when defining a CTE, so we try to push them down."""
 130    if isinstance(expression, exp.CTE) and expression.alias_column_names:
 131        cte_query = expression.this
 132
 133        if cte_query.is_star:
 134            logger.warning(
 135                "Can't push down CTE column names for star queries. Run the query through"
 136                " the optimizer or use 'qualify' to expand the star projections first."
 137            )
 138            return expression
 139
 140        column_names = expression.alias_column_names
 141        expression.args["alias"].set("columns", None)
 142
 143        for name, select in zip(column_names, cte_query.selects):
 144            to_replace = select
 145
 146            if isinstance(select, exp.Alias):
 147                select = select.this
 148
 149            # Inner aliases are shadowed by the CTE column names
 150            to_replace.replace(exp.alias_(select, name))
 151
 152    return expression
 153
 154
 155def _build_parse_timestamp(args: t.List) -> exp.StrToTime:
 156    this = build_formatted_time(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)])
 157    this.set("zone", seq_get(args, 2))
 158    return this
 159
 160
 161def _build_timestamp(args: t.List) -> exp.Timestamp:
 162    timestamp = exp.Timestamp.from_arg_list(args)
 163    timestamp.set("with_tz", True)
 164    return timestamp
 165
 166
 167def _build_date(args: t.List) -> exp.Date | exp.DateFromParts:
 168    expr_type = exp.DateFromParts if len(args) == 3 else exp.Date
 169    return expr_type.from_arg_list(args)
 170
 171
 172def _build_to_hex(args: t.List) -> exp.Hex | exp.MD5:
 173    # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation
 174    arg = seq_get(args, 0)
 175    return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.LowerHex(this=arg)
 176
 177
 178def _build_json_strip_nulls(args: t.List) -> exp.JSONStripNulls:
 179    expression = exp.JSONStripNulls(this=seq_get(args, 0))
 180
 181    for arg in args[1:]:
 182        if isinstance(arg, exp.Kwarg):
 183            expression.set(arg.this.name.lower(), arg)
 184        else:
 185            expression.set("expression", arg)
 186
 187    return expression
 188
 189
 190def _array_contains_sql(self: BigQuery.Generator, expression: exp.ArrayContains) -> str:
 191    return self.sql(
 192        exp.Exists(
 193            this=exp.select("1")
 194            .from_(exp.Unnest(expressions=[expression.left]).as_("_unnest", table=["_col"]))
 195            .where(exp.column("_col").eq(expression.right))
 196        )
 197    )
 198
 199
 200def _ts_or_ds_add_sql(self: BigQuery.Generator, expression: exp.TsOrDsAdd) -> str:
 201    return date_add_interval_sql("DATE", "ADD")(self, ts_or_ds_add_cast(expression))
 202
 203
 204def _ts_or_ds_diff_sql(self: BigQuery.Generator, expression: exp.TsOrDsDiff) -> str:
 205    expression.this.replace(exp.cast(expression.this, exp.DataType.Type.TIMESTAMP))
 206    expression.expression.replace(exp.cast(expression.expression, exp.DataType.Type.TIMESTAMP))
 207    unit = unit_to_var(expression)
 208    return self.func("DATE_DIFF", expression.this, expression.expression, unit)
 209
 210
 211def _unix_to_time_sql(self: BigQuery.Generator, expression: exp.UnixToTime) -> str:
 212    scale = expression.args.get("scale")
 213    timestamp = expression.this
 214
 215    if scale in (None, exp.UnixToTime.SECONDS):
 216        return self.func("TIMESTAMP_SECONDS", timestamp)
 217    if scale == exp.UnixToTime.MILLIS:
 218        return self.func("TIMESTAMP_MILLIS", timestamp)
 219    if scale == exp.UnixToTime.MICROS:
 220        return self.func("TIMESTAMP_MICROS", timestamp)
 221
 222    unix_seconds = exp.cast(
 223        exp.Div(this=timestamp, expression=exp.func("POW", 10, scale)), exp.DataType.Type.BIGINT
 224    )
 225    return self.func("TIMESTAMP_SECONDS", unix_seconds)
 226
 227
 228def _build_time(args: t.List) -> exp.Func:
 229    if len(args) == 1:
 230        return exp.TsOrDsToTime(this=args[0])
 231    if len(args) == 2:
 232        return exp.Time.from_arg_list(args)
 233    return exp.TimeFromParts.from_arg_list(args)
 234
 235
 236def _build_datetime(args: t.List) -> exp.Func:
 237    if len(args) == 1:
 238        return exp.TsOrDsToDatetime.from_arg_list(args)
 239    if len(args) == 2:
 240        return exp.Datetime.from_arg_list(args)
 241    return exp.TimestampFromParts.from_arg_list(args)
 242
 243
 244def _build_regexp_extract(
 245    expr_type: t.Type[E], default_group: t.Optional[exp.Expression] = None
 246) -> t.Callable[[t.List], E]:
 247    def _builder(args: t.List) -> E:
 248        try:
 249            group = re.compile(args[1].name).groups == 1
 250        except re.error:
 251            group = False
 252
 253        # Default group is used for the transpilation of REGEXP_EXTRACT_ALL
 254        return expr_type(
 255            this=seq_get(args, 0),
 256            expression=seq_get(args, 1),
 257            position=seq_get(args, 2),
 258            occurrence=seq_get(args, 3),
 259            group=exp.Literal.number(1) if group else default_group,
 260        )
 261
 262    return _builder
 263
 264
 265def _build_extract_json_with_default_path(expr_type: t.Type[E]) -> t.Callable[[t.List, Dialect], E]:
 266    def _builder(args: t.List, dialect: Dialect) -> E:
 267        if len(args) == 1:
 268            # The default value for the JSONPath is '$' i.e all of the data
 269            args.append(exp.Literal.string("$"))
 270        return parser.build_extract_json_with_path(expr_type)(args, dialect)
 271
 272    return _builder
 273
 274
 275def _str_to_datetime_sql(
 276    self: BigQuery.Generator, expression: exp.StrToDate | exp.StrToTime
 277) -> str:
 278    this = self.sql(expression, "this")
 279    dtype = "DATE" if isinstance(expression, exp.StrToDate) else "TIMESTAMP"
 280
 281    if expression.args.get("safe"):
 282        fmt = self.format_time(
 283            expression,
 284            self.dialect.INVERSE_FORMAT_MAPPING,
 285            self.dialect.INVERSE_FORMAT_TRIE,
 286        )
 287        return f"SAFE_CAST({this} AS {dtype} FORMAT {fmt})"
 288
 289    fmt = self.format_time(expression)
 290    return self.func(f"PARSE_{dtype}", fmt, this, expression.args.get("zone"))
 291
 292
 293def _annotate_math_functions(self: TypeAnnotator, expression: E) -> E:
 294    """
 295    Many BigQuery math functions such as CEIL, FLOOR etc follow this return type convention:
 296    +---------+---------+---------+------------+---------+
 297    |  INPUT  | INT64   | NUMERIC | BIGNUMERIC | FLOAT64 |
 298    +---------+---------+---------+------------+---------+
 299    |  OUTPUT | FLOAT64 | NUMERIC | BIGNUMERIC | FLOAT64 |
 300    +---------+---------+---------+------------+---------+
 301    """
 302    self._annotate_args(expression)
 303
 304    this: exp.Expression = expression.this
 305
 306    self._set_type(
 307        expression,
 308        exp.DataType.Type.DOUBLE if this.is_type(*exp.DataType.INTEGER_TYPES) else this.type,
 309    )
 310    return expression
 311
 312
 313def _annotate_perncentile_cont(
 314    self: TypeAnnotator, expression: exp.PercentileCont
 315) -> exp.PercentileCont:
 316    """
 317    +------------+-----------+------------+---------+
 318    | INPUT      | NUMERIC   | BIGNUMERIC | FLOAT64 |
 319    +------------+-----------+------------+---------+
 320    | NUMERIC    | NUMERIC   | BIGNUMERIC | FLOAT64 |
 321    | BIGNUMERIC | BIGNUMERIC| BIGNUMERIC | FLOAT64 |
 322    | FLOAT64    | FLOAT64   | FLOAT64    | FLOAT64 |
 323    +------------+-----------+------------+---------+
 324    """
 325    self._annotate_args(expression)
 326
 327    self._set_type(expression, self._maybe_coerce(expression.this.type, expression.expression.type))
 328    return expression
 329
 330
 331def _annotate_by_args_approx_top(self: TypeAnnotator, expression: exp.ApproxTopK) -> exp.ApproxTopK:
 332    self._annotate_args(expression)
 333
 334    struct_type = exp.DataType(
 335        this=exp.DataType.Type.STRUCT,
 336        expressions=[expression.this.type, exp.DataType(this=exp.DataType.Type.BIGINT)],
 337        nested=True,
 338    )
 339    self._set_type(
 340        expression,
 341        exp.DataType(this=exp.DataType.Type.ARRAY, expressions=[struct_type], nested=True),
 342    )
 343
 344    return expression
 345
 346
 347@unsupported_args("ins_cost", "del_cost", "sub_cost")
 348def _levenshtein_sql(self: BigQuery.Generator, expression: exp.Levenshtein) -> str:
 349    max_dist = expression.args.get("max_dist")
 350    if max_dist:
 351        max_dist = exp.Kwarg(this=exp.var("max_distance"), expression=max_dist)
 352
 353    return self.func("EDIT_DISTANCE", expression.this, expression.expression, max_dist)
 354
 355
 356def _build_levenshtein(args: t.List) -> exp.Levenshtein:
 357    max_dist = seq_get(args, 2)
 358    return exp.Levenshtein(
 359        this=seq_get(args, 0),
 360        expression=seq_get(args, 1),
 361        max_dist=max_dist.expression if max_dist else None,
 362    )
 363
 364
 365def _build_format_time(expr_type: t.Type[exp.Expression]) -> t.Callable[[t.List], exp.TimeToStr]:
 366    def _builder(args: t.List) -> exp.TimeToStr:
 367        return exp.TimeToStr(
 368            this=expr_type(this=seq_get(args, 1)),
 369            format=seq_get(args, 0),
 370            zone=seq_get(args, 2),
 371        )
 372
 373    return _builder
 374
 375
 376def _build_contains_substring(args: t.List) -> exp.Contains:
 377    # Lowercase the operands in case of transpilation, as exp.Contains
 378    # is case-sensitive on other dialects
 379    this = exp.Lower(this=seq_get(args, 0))
 380    expr = exp.Lower(this=seq_get(args, 1))
 381
 382    return exp.Contains(this=this, expression=expr, json_scope=seq_get(args, 2))
 383
 384
 385def _json_extract_sql(self: BigQuery.Generator, expression: JSON_EXTRACT_TYPE) -> str:
 386    name = (expression._meta and expression.meta.get("name")) or expression.sql_name()
 387    upper = name.upper()
 388
 389    dquote_escaping = upper in DQUOTES_ESCAPING_JSON_FUNCTIONS
 390
 391    if dquote_escaping:
 392        self._quote_json_path_key_using_brackets = False
 393
 394    sql = rename_func(upper)(self, expression)
 395
 396    if dquote_escaping:
 397        self._quote_json_path_key_using_brackets = True
 398
 399    return sql
 400
 401
 402def _annotate_concat(self: TypeAnnotator, expression: exp.Concat) -> exp.Concat:
 403    annotated = self._annotate_by_args(expression, "expressions")
 404
 405    # Args must be BYTES or types that can be cast to STRING, return type is either BYTES or STRING
 406    # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#concat
 407    if not annotated.is_type(exp.DataType.Type.BINARY, exp.DataType.Type.UNKNOWN):
 408        annotated.type = exp.DataType.Type.VARCHAR
 409
 410    return annotated
 411
 412
 413def _annotate_array(self: TypeAnnotator, expression: exp.Array) -> exp.Array:
 414    array_args = expression.expressions
 415
 416    # BigQuery behaves as follows:
 417    #
 418    # SELECT t, TYPEOF(t) FROM (SELECT 'foo') AS t            -- foo, STRUCT<STRING>
 419    # SELECT ARRAY(SELECT 'foo'), TYPEOF(ARRAY(SELECT 'foo')) -- foo, ARRAY<STRING>
 420    if (
 421        len(array_args) == 1
 422        and isinstance(select := array_args[0].unnest(), exp.Select)
 423        and (query_type := select.meta.get("query_type")) is not None
 424        and query_type.is_type(exp.DataType.Type.STRUCT)
 425        and len(query_type.expressions) == 1
 426        and isinstance(col_def := query_type.expressions[0], exp.ColumnDef)
 427        and (projection_type := col_def.kind) is not None
 428        and not projection_type.is_type(exp.DataType.Type.UNKNOWN)
 429    ):
 430        array_type = exp.DataType(
 431            this=exp.DataType.Type.ARRAY,
 432            expressions=[projection_type.copy()],
 433            nested=True,
 434        )
 435        return self._annotate_with_type(expression, array_type)
 436
 437    return self._annotate_by_args(expression, "expressions", array=True)
 438
 439
 440class BigQuery(Dialect):
 441    WEEK_OFFSET = -1
 442    UNNEST_COLUMN_ONLY = True
 443    SUPPORTS_USER_DEFINED_TYPES = False
 444    SUPPORTS_SEMI_ANTI_JOIN = False
 445    LOG_BASE_FIRST = False
 446    HEX_LOWERCASE = True
 447    FORCE_EARLY_ALIAS_REF_EXPANSION = True
 448    PRESERVE_ORIGINAL_NAMES = True
 449    HEX_STRING_IS_INTEGER_TYPE = True
 450
 451    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
 452    NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
 453
 454    # bigquery udfs are case sensitive
 455    NORMALIZE_FUNCTIONS = False
 456
 457    # https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_elements_date_time
 458    TIME_MAPPING = {
 459        "%D": "%m/%d/%y",
 460        "%E6S": "%S.%f",
 461        "%e": "%-d",
 462    }
 463
 464    FORMAT_MAPPING = {
 465        "DD": "%d",
 466        "MM": "%m",
 467        "MON": "%b",
 468        "MONTH": "%B",
 469        "YYYY": "%Y",
 470        "YY": "%y",
 471        "HH": "%I",
 472        "HH12": "%I",
 473        "HH24": "%H",
 474        "MI": "%M",
 475        "SS": "%S",
 476        "SSSSS": "%f",
 477        "TZH": "%z",
 478    }
 479
 480    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
 481    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
 482    # https://cloud.google.com/bigquery/docs/querying-wildcard-tables#scanning_a_range_of_tables_using_table_suffix
 483    # https://cloud.google.com/bigquery/docs/query-cloud-storage-data#query_the_file_name_pseudo-column
 484    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE", "_TABLE_SUFFIX", "_FILE_NAME"}
 485
 486    # All set operations require either a DISTINCT or ALL specifier
 487    SET_OP_DISTINCT_BY_DEFAULT = dict.fromkeys((exp.Except, exp.Intersect, exp.Union), None)
 488
 489    # https://cloud.google.com/bigquery/docs/reference/standard-sql/navigation_functions#percentile_cont
 490    COERCES_TO = {
 491        **TypeAnnotator.COERCES_TO,
 492        exp.DataType.Type.BIGDECIMAL: {exp.DataType.Type.DOUBLE},
 493    }
 494    COERCES_TO[exp.DataType.Type.DECIMAL] |= {exp.DataType.Type.BIGDECIMAL}
 495
 496    # BigQuery maps Type.TIMESTAMP to DATETIME, so we need to amend the inferred types
 497    TYPE_TO_EXPRESSIONS = {
 498        **Dialect.TYPE_TO_EXPRESSIONS,
 499        exp.DataType.Type.TIMESTAMPTZ: Dialect.TYPE_TO_EXPRESSIONS[exp.DataType.Type.TIMESTAMP],
 500    }
 501    TYPE_TO_EXPRESSIONS.pop(exp.DataType.Type.TIMESTAMP)
 502
 503    ANNOTATORS = {
 504        **Dialect.ANNOTATORS,
 505        **{
 506            expr_type: annotate_with_type_lambda(data_type)
 507            for data_type, expressions in TYPE_TO_EXPRESSIONS.items()
 508            for expr_type in expressions
 509        },
 510        **{
 511            expr_type: lambda self, e: _annotate_math_functions(self, e)
 512            for expr_type in (exp.Floor, exp.Ceil, exp.Log, exp.Ln, exp.Sqrt, exp.Exp, exp.Round)
 513        },
 514        **{
 515            expr_type: lambda self, e: self._annotate_by_args(e, "this")
 516            for expr_type in (
 517                exp.Left,
 518                exp.Right,
 519                exp.Lower,
 520                exp.Upper,
 521                exp.Pad,
 522                exp.Trim,
 523                exp.RegexpExtract,
 524                exp.RegexpReplace,
 525                exp.Repeat,
 526                exp.Substring,
 527            )
 528        },
 529        exp.ApproxTopSum: lambda self, e: _annotate_by_args_approx_top(self, e),
 530        exp.ApproxTopK: lambda self, e: _annotate_by_args_approx_top(self, e),
 531        exp.ApproxQuantiles: lambda self, e: self._annotate_by_args(e, "this", array=True),
 532        exp.ArgMax: lambda self, e: self._annotate_by_args(e, "this"),
 533        exp.ArgMin: lambda self, e: self._annotate_by_args(e, "this"),
 534        exp.Array: _annotate_array,
 535        exp.ArrayConcat: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
 536        exp.Ascii: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 537        exp.BitwiseAndAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 538        exp.BitwiseOrAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 539        exp.BitwiseXorAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 540        exp.BitwiseCountAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 541        exp.ByteLength: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 542        exp.ByteString: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 543        exp.CodePointsToBytes: lambda self, e: self._annotate_with_type(
 544            e, exp.DataType.Type.BINARY
 545        ),
 546        exp.CodePointsToString: lambda self, e: self._annotate_with_type(
 547            e, exp.DataType.Type.VARCHAR
 548        ),
 549        exp.Concat: _annotate_concat,
 550        exp.Contains: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BOOLEAN),
 551        exp.Corr: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 552        exp.CovarPop: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 553        exp.CovarSamp: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 554        exp.CumeDist: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 555        exp.DateFromUnixDate: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DATE),
 556        exp.DateTrunc: lambda self, e: self._annotate_by_args(e, "this"),
 557        exp.DenseRank: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 558        exp.FarmFingerprint: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 559        exp.FirstValue: lambda self, e: self._annotate_by_args(e, "this"),
 560        exp.Unhex: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 561        exp.Float64: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 562        exp.Format: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 563        exp.GenerateTimestampArray: lambda self, e: self._annotate_with_type(
 564            e, exp.DataType.build("ARRAY<TIMESTAMP>", dialect="bigquery")
 565        ),
 566        exp.Grouping: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 567        exp.IgnoreNulls: lambda self, e: self._annotate_by_args(e, "this"),
 568        exp.JSONArray: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 569        exp.JSONArrayAppend: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 570        exp.JSONArrayInsert: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 571        exp.JSONBool: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BOOLEAN),
 572        exp.JSONExtractScalar: lambda self, e: self._annotate_with_type(
 573            e, exp.DataType.Type.VARCHAR
 574        ),
 575        exp.JSONExtract: lambda self, e: self._annotate_by_args(e, "this"),
 576        exp.JSONExtractArray: lambda self, e: self._annotate_by_args(e, "this", array=True),
 577        exp.JSONFormat: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 578        exp.JSONKeysAtDepth: lambda self, e: self._annotate_with_type(
 579            e, exp.DataType.build("ARRAY<VARCHAR>", dialect="bigquery")
 580        ),
 581        exp.JSONObject: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 582        exp.JSONRemove: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 583        exp.JSONSet: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 584        exp.JSONStripNulls: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 585        exp.JSONType: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 586        exp.JSONValueArray: lambda self, e: self._annotate_with_type(
 587            e, exp.DataType.build("ARRAY<VARCHAR>", dialect="bigquery")
 588        ),
 589        exp.Lag: lambda self, e: self._annotate_by_args(e, "this", "default"),
 590        exp.Lead: lambda self, e: self._annotate_by_args(e, "this"),
 591        exp.LowerHex: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 592        exp.LaxBool: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BOOLEAN),
 593        exp.LaxFloat64: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 594        exp.LaxInt64: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 595        exp.LaxString: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 596        exp.MD5Digest: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 597        exp.Normalize: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 598        exp.NthValue: lambda self, e: self._annotate_by_args(e, "this"),
 599        exp.Ntile: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 600        exp.ParseTime: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.TIME),
 601        exp.ParseDatetime: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DATETIME),
 602        exp.ParseBignumeric: lambda self, e: self._annotate_with_type(
 603            e, exp.DataType.Type.BIGDECIMAL
 604        ),
 605        exp.ParseNumeric: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DECIMAL),
 606        exp.PercentileCont: lambda self, e: _annotate_perncentile_cont(self, e),
 607        exp.PercentileDisc: lambda self, e: self._annotate_by_args(e, "this"),
 608        exp.PercentRank: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 609        exp.Rank: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 610        exp.RegexpExtractAll: lambda self, e: self._annotate_by_args(e, "this", array=True),
 611        exp.RegexpInstr: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 612        exp.Replace: lambda self, e: self._annotate_by_args(e, "this"),
 613        exp.RespectNulls: lambda self, e: self._annotate_by_args(e, "this"),
 614        exp.Reverse: lambda self, e: self._annotate_by_args(e, "this"),
 615        exp.RowNumber: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 616        exp.SafeConvertBytesToString: lambda self, e: self._annotate_with_type(
 617            e, exp.DataType.Type.VARCHAR
 618        ),
 619        exp.Soundex: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 620        exp.SHA: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 621        exp.SHA2: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 622        exp.Sign: lambda self, e: self._annotate_by_args(e, "this"),
 623        exp.Split: lambda self, e: self._annotate_by_args(e, "this", array=True),
 624        exp.TimestampFromParts: lambda self, e: self._annotate_with_type(
 625            e, exp.DataType.Type.DATETIME
 626        ),
 627        exp.TimestampTrunc: lambda self, e: self._annotate_by_args(e, "this"),
 628        exp.TimeFromParts: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.TIME),
 629        exp.TimeTrunc: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.TIME),
 630        exp.ToCodePoints: lambda self, e: self._annotate_with_type(
 631            e, exp.DataType.build("ARRAY<BIGINT>", dialect="bigquery")
 632        ),
 633        exp.TsOrDsToTime: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.TIME),
 634        exp.Translate: lambda self, e: self._annotate_by_args(e, "this"),
 635        exp.Unicode: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 636    }
 637
 638    def normalize_identifier(self, expression: E) -> E:
 639        if (
 640            isinstance(expression, exp.Identifier)
 641            and self.normalization_strategy is NormalizationStrategy.CASE_INSENSITIVE
 642        ):
 643            parent = expression.parent
 644            while isinstance(parent, exp.Dot):
 645                parent = parent.parent
 646
 647            # In BigQuery, CTEs are case-insensitive, but UDF and table names are case-sensitive
 648            # by default. The following check uses a heuristic to detect tables based on whether
 649            # they are qualified. This should generally be correct, because tables in BigQuery
 650            # must be qualified with at least a dataset, unless @@dataset_id is set.
 651            case_sensitive = (
 652                isinstance(parent, exp.UserDefinedFunction)
 653                or (
 654                    isinstance(parent, exp.Table)
 655                    and parent.db
 656                    and (parent.meta.get("quoted_table") or not parent.meta.get("maybe_column"))
 657                )
 658                or expression.meta.get("is_table")
 659            )
 660            if not case_sensitive:
 661                expression.set("this", expression.this.lower())
 662
 663            return t.cast(E, expression)
 664
 665        return super().normalize_identifier(expression)
 666
 667    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
 668        VAR_TOKENS = {
 669            TokenType.DASH,
 670            TokenType.VAR,
 671        }
 672
 673    class Tokenizer(tokens.Tokenizer):
 674        QUOTES = ["'", '"', '"""', "'''"]
 675        COMMENTS = ["--", "#", ("/*", "*/")]
 676        IDENTIFIERS = ["`"]
 677        STRING_ESCAPES = ["\\"]
 678
 679        HEX_STRINGS = [("0x", ""), ("0X", "")]
 680
 681        BYTE_STRINGS = [
 682            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
 683        ]
 684
 685        RAW_STRINGS = [
 686            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
 687        ]
 688
 689        NESTED_COMMENTS = False
 690
 691        KEYWORDS = {
 692            **tokens.Tokenizer.KEYWORDS,
 693            "ANY TYPE": TokenType.VARIANT,
 694            "BEGIN": TokenType.COMMAND,
 695            "BEGIN TRANSACTION": TokenType.BEGIN,
 696            "BYTEINT": TokenType.INT,
 697            "BYTES": TokenType.BINARY,
 698            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
 699            "DATETIME": TokenType.TIMESTAMP,
 700            "DECLARE": TokenType.DECLARE,
 701            "ELSEIF": TokenType.COMMAND,
 702            "EXCEPTION": TokenType.COMMAND,
 703            "EXPORT": TokenType.EXPORT,
 704            "FLOAT64": TokenType.DOUBLE,
 705            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
 706            "LOOP": TokenType.COMMAND,
 707            "MODEL": TokenType.MODEL,
 708            "NOT DETERMINISTIC": TokenType.VOLATILE,
 709            "RECORD": TokenType.STRUCT,
 710            "REPEAT": TokenType.COMMAND,
 711            "TIMESTAMP": TokenType.TIMESTAMPTZ,
 712            "WHILE": TokenType.COMMAND,
 713        }
 714        KEYWORDS.pop("DIV")
 715        KEYWORDS.pop("VALUES")
 716        KEYWORDS.pop("/*+")
 717
 718    class Parser(parser.Parser):
 719        PREFIXED_PIVOT_COLUMNS = True
 720        LOG_DEFAULTS_TO_LN = True
 721        SUPPORTS_IMPLICIT_UNNEST = True
 722        JOINS_HAVE_EQUAL_PRECEDENCE = True
 723
 724        # BigQuery does not allow ASC/DESC to be used as an identifier
 725        ID_VAR_TOKENS = parser.Parser.ID_VAR_TOKENS - {TokenType.ASC, TokenType.DESC}
 726        ALIAS_TOKENS = parser.Parser.ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 727        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 728        COMMENT_TABLE_ALIAS_TOKENS = parser.Parser.COMMENT_TABLE_ALIAS_TOKENS - {
 729            TokenType.ASC,
 730            TokenType.DESC,
 731        }
 732        UPDATE_ALIAS_TOKENS = parser.Parser.UPDATE_ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 733
 734        FUNCTIONS = {
 735            **parser.Parser.FUNCTIONS,
 736            "APPROX_TOP_COUNT": exp.ApproxTopK.from_arg_list,
 737            "BOOL": exp.JSONBool.from_arg_list,
 738            "CONTAINS_SUBSTR": _build_contains_substring,
 739            "DATE": _build_date,
 740            "DATE_ADD": build_date_delta_with_interval(exp.DateAdd),
 741            "DATE_SUB": build_date_delta_with_interval(exp.DateSub),
 742            "DATE_TRUNC": lambda args: exp.DateTrunc(
 743                unit=seq_get(args, 1),
 744                this=seq_get(args, 0),
 745                zone=seq_get(args, 2),
 746            ),
 747            "DATETIME": _build_datetime,
 748            "DATETIME_ADD": build_date_delta_with_interval(exp.DatetimeAdd),
 749            "DATETIME_SUB": build_date_delta_with_interval(exp.DatetimeSub),
 750            "DIV": binary_from_function(exp.IntDiv),
 751            "EDIT_DISTANCE": _build_levenshtein,
 752            "FORMAT_DATE": _build_format_time(exp.TsOrDsToDate),
 753            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
 754            "JSON_EXTRACT_SCALAR": _build_extract_json_with_default_path(exp.JSONExtractScalar),
 755            "JSON_EXTRACT_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
 756            "JSON_EXTRACT_STRING_ARRAY": _build_extract_json_with_default_path(exp.JSONValueArray),
 757            "JSON_KEYS": exp.JSONKeysAtDepth.from_arg_list,
 758            "JSON_QUERY": parser.build_extract_json_with_path(exp.JSONExtract),
 759            "JSON_QUERY_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
 760            "JSON_STRIP_NULLS": _build_json_strip_nulls,
 761            "JSON_VALUE": _build_extract_json_with_default_path(exp.JSONExtractScalar),
 762            "JSON_VALUE_ARRAY": _build_extract_json_with_default_path(exp.JSONValueArray),
 763            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 764            "MD5": exp.MD5Digest.from_arg_list,
 765            "NORMALIZE_AND_CASEFOLD": lambda args: exp.Normalize(
 766                this=seq_get(args, 0), form=seq_get(args, 1), is_casefold=True
 767            ),
 768            "OCTET_LENGTH": exp.ByteLength.from_arg_list,
 769            "TO_HEX": _build_to_hex,
 770            "PARSE_DATE": lambda args: build_formatted_time(exp.StrToDate, "bigquery")(
 771                [seq_get(args, 1), seq_get(args, 0)]
 772            ),
 773            "PARSE_TIME": lambda args: build_formatted_time(exp.ParseTime, "bigquery")(
 774                [seq_get(args, 1), seq_get(args, 0)]
 775            ),
 776            "PARSE_TIMESTAMP": _build_parse_timestamp,
 777            "PARSE_DATETIME": lambda args: build_formatted_time(exp.ParseDatetime, "bigquery")(
 778                [seq_get(args, 1), seq_get(args, 0)]
 779            ),
 780            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
 781            "REGEXP_EXTRACT": _build_regexp_extract(exp.RegexpExtract),
 782            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 783            "REGEXP_EXTRACT_ALL": _build_regexp_extract(
 784                exp.RegexpExtractAll, default_group=exp.Literal.number(0)
 785            ),
 786            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
 787            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
 788            "SPLIT": lambda args: exp.Split(
 789                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
 790                this=seq_get(args, 0),
 791                expression=seq_get(args, 1) or exp.Literal.string(","),
 792            ),
 793            "STRPOS": exp.StrPosition.from_arg_list,
 794            "TIME": _build_time,
 795            "TIME_ADD": build_date_delta_with_interval(exp.TimeAdd),
 796            "TIME_SUB": build_date_delta_with_interval(exp.TimeSub),
 797            "TIMESTAMP": _build_timestamp,
 798            "TIMESTAMP_ADD": build_date_delta_with_interval(exp.TimestampAdd),
 799            "TIMESTAMP_SUB": build_date_delta_with_interval(exp.TimestampSub),
 800            "TIMESTAMP_MICROS": lambda args: exp.UnixToTime(
 801                this=seq_get(args, 0), scale=exp.UnixToTime.MICROS
 802            ),
 803            "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime(
 804                this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS
 805            ),
 806            "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)),
 807            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
 808            "FORMAT_DATETIME": _build_format_time(exp.TsOrDsToDatetime),
 809            "FORMAT_TIMESTAMP": _build_format_time(exp.TsOrDsToTimestamp),
 810            "FORMAT_TIME": _build_format_time(exp.TsOrDsToTime),
 811            "FROM_HEX": exp.Unhex.from_arg_list,
 812            "WEEK": lambda args: exp.WeekStart(this=exp.var(seq_get(args, 0))),
 813        }
 814
 815        FUNCTION_PARSERS = {
 816            **parser.Parser.FUNCTION_PARSERS,
 817            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
 818            "JSON_ARRAY": lambda self: self.expression(
 819                exp.JSONArray, expressions=self._parse_csv(self._parse_bitwise)
 820            ),
 821            "MAKE_INTERVAL": lambda self: self._parse_make_interval(),
 822            "PREDICT": lambda self: self._parse_predict(),
 823            "FEATURES_AT_TIME": lambda self: self._parse_features_at_time(),
 824            "GENERATE_EMBEDDING": lambda self: self._parse_generate_embedding(),
 825            "VECTOR_SEARCH": lambda self: self._parse_vector_search(),
 826        }
 827        FUNCTION_PARSERS.pop("TRIM")
 828
 829        NO_PAREN_FUNCTIONS = {
 830            **parser.Parser.NO_PAREN_FUNCTIONS,
 831            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
 832        }
 833
 834        NESTED_TYPE_TOKENS = {
 835            *parser.Parser.NESTED_TYPE_TOKENS,
 836            TokenType.TABLE,
 837        }
 838
 839        PROPERTY_PARSERS = {
 840            **parser.Parser.PROPERTY_PARSERS,
 841            "NOT DETERMINISTIC": lambda self: self.expression(
 842                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
 843            ),
 844            "OPTIONS": lambda self: self._parse_with_property(),
 845        }
 846
 847        CONSTRAINT_PARSERS = {
 848            **parser.Parser.CONSTRAINT_PARSERS,
 849            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
 850        }
 851
 852        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
 853        RANGE_PARSERS.pop(TokenType.OVERLAPS)
 854
 855        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
 856
 857        DASHED_TABLE_PART_FOLLOW_TOKENS = {TokenType.DOT, TokenType.L_PAREN, TokenType.R_PAREN}
 858
 859        STATEMENT_PARSERS = {
 860            **parser.Parser.STATEMENT_PARSERS,
 861            TokenType.ELSE: lambda self: self._parse_as_command(self._prev),
 862            TokenType.END: lambda self: self._parse_as_command(self._prev),
 863            TokenType.FOR: lambda self: self._parse_for_in(),
 864            TokenType.EXPORT: lambda self: self._parse_export_data(),
 865            TokenType.DECLARE: lambda self: self._parse_declare(),
 866        }
 867
 868        BRACKET_OFFSETS = {
 869            "OFFSET": (0, False),
 870            "ORDINAL": (1, False),
 871            "SAFE_OFFSET": (0, True),
 872            "SAFE_ORDINAL": (1, True),
 873        }
 874
 875        def _parse_for_in(self) -> t.Union[exp.ForIn, exp.Command]:
 876            index = self._index
 877            this = self._parse_range()
 878            self._match_text_seq("DO")
 879            if self._match(TokenType.COMMAND):
 880                self._retreat(index)
 881                return self._parse_as_command(self._prev)
 882            return self.expression(exp.ForIn, this=this, expression=self._parse_statement())
 883
 884        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
 885            this = super()._parse_table_part(schema=schema) or self._parse_number()
 886
 887            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
 888            if isinstance(this, exp.Identifier):
 889                table_name = this.name
 890                while self._match(TokenType.DASH, advance=False) and self._next:
 891                    start = self._curr
 892                    while self._is_connected() and not self._match_set(
 893                        self.DASHED_TABLE_PART_FOLLOW_TOKENS, advance=False
 894                    ):
 895                        self._advance()
 896
 897                    if start == self._curr:
 898                        break
 899
 900                    table_name += self._find_sql(start, self._prev)
 901
 902                this = exp.Identifier(
 903                    this=table_name, quoted=this.args.get("quoted")
 904                ).update_positions(this)
 905            elif isinstance(this, exp.Literal):
 906                table_name = this.name
 907
 908                if self._is_connected() and self._parse_var(any_token=True):
 909                    table_name += self._prev.text
 910
 911                this = exp.Identifier(this=table_name, quoted=True).update_positions(this)
 912
 913            return this
 914
 915        def _parse_table_parts(
 916            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 917        ) -> exp.Table:
 918            table = super()._parse_table_parts(
 919                schema=schema, is_db_reference=is_db_reference, wildcard=True
 920            )
 921
 922            # proj-1.db.tbl -- `1.` is tokenized as a float so we need to unravel it here
 923            if not table.catalog:
 924                if table.db:
 925                    previous_db = table.args["db"]
 926                    parts = table.db.split(".")
 927                    if len(parts) == 2 and not table.args["db"].quoted:
 928                        table.set(
 929                            "catalog", exp.Identifier(this=parts[0]).update_positions(previous_db)
 930                        )
 931                        table.set("db", exp.Identifier(this=parts[1]).update_positions(previous_db))
 932                else:
 933                    previous_this = table.this
 934                    parts = table.name.split(".")
 935                    if len(parts) == 2 and not table.this.quoted:
 936                        table.set(
 937                            "db", exp.Identifier(this=parts[0]).update_positions(previous_this)
 938                        )
 939                        table.set(
 940                            "this", exp.Identifier(this=parts[1]).update_positions(previous_this)
 941                        )
 942
 943            if isinstance(table.this, exp.Identifier) and any("." in p.name for p in table.parts):
 944                alias = table.this
 945                catalog, db, this, *rest = (
 946                    exp.to_identifier(p, quoted=True)
 947                    for p in split_num_words(".".join(p.name for p in table.parts), ".", 3)
 948                )
 949
 950                for part in (catalog, db, this):
 951                    if part:
 952                        part.update_positions(table.this)
 953
 954                if rest and this:
 955                    this = exp.Dot.build([this, *rest])  # type: ignore
 956
 957                table = exp.Table(
 958                    this=this, db=db, catalog=catalog, pivots=table.args.get("pivots")
 959                )
 960                table.meta["quoted_table"] = True
 961            else:
 962                alias = None
 963
 964            # The `INFORMATION_SCHEMA` views in BigQuery need to be qualified by a region or
 965            # dataset, so if the project identifier is omitted we need to fix the ast so that
 966            # the `INFORMATION_SCHEMA.X` bit is represented as a single (quoted) Identifier.
 967            # Otherwise, we wouldn't correctly qualify a `Table` node that references these
 968            # views, because it would seem like the "catalog" part is set, when it'd actually
 969            # be the region/dataset. Merging the two identifiers into a single one is done to
 970            # avoid producing a 4-part Table reference, which would cause issues in the schema
 971            # module, when there are 3-part table names mixed with information schema views.
 972            #
 973            # See: https://cloud.google.com/bigquery/docs/information-schema-intro#syntax
 974            table_parts = table.parts
 975            if len(table_parts) > 1 and table_parts[-2].name.upper() == "INFORMATION_SCHEMA":
 976                # We need to alias the table here to avoid breaking existing qualified columns.
 977                # This is expected to be safe, because if there's an actual alias coming up in
 978                # the token stream, it will overwrite this one. If there isn't one, we are only
 979                # exposing the name that can be used to reference the view explicitly (a no-op).
 980                exp.alias_(
 981                    table,
 982                    t.cast(exp.Identifier, alias or table_parts[-1]),
 983                    table=True,
 984                    copy=False,
 985                )
 986
 987                info_schema_view = f"{table_parts[-2].name}.{table_parts[-1].name}"
 988                new_this = exp.Identifier(this=info_schema_view, quoted=True).update_positions(
 989                    line=table_parts[-2].meta.get("line"),
 990                    col=table_parts[-1].meta.get("col"),
 991                    start=table_parts[-2].meta.get("start"),
 992                    end=table_parts[-1].meta.get("end"),
 993                )
 994                table.set("this", new_this)
 995                table.set("db", seq_get(table_parts, -3))
 996                table.set("catalog", seq_get(table_parts, -4))
 997
 998            return table
 999
1000        def _parse_column(self) -> t.Optional[exp.Expression]:
1001            column = super()._parse_column()
1002            if isinstance(column, exp.Column):
1003                parts = column.parts
1004                if any("." in p.name for p in parts):
1005                    catalog, db, table, this, *rest = (
1006                        exp.to_identifier(p, quoted=True)
1007                        for p in split_num_words(".".join(p.name for p in parts), ".", 4)
1008                    )
1009
1010                    if rest and this:
1011                        this = exp.Dot.build([this, *rest])  # type: ignore
1012
1013                    column = exp.Column(this=this, table=table, db=db, catalog=catalog)
1014                    column.meta["quoted_column"] = True
1015
1016            return column
1017
1018        @t.overload
1019        def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: ...
1020
1021        @t.overload
1022        def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: ...
1023
1024        def _parse_json_object(self, agg=False):
1025            json_object = super()._parse_json_object()
1026            array_kv_pair = seq_get(json_object.expressions, 0)
1027
1028            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
1029            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
1030            if (
1031                array_kv_pair
1032                and isinstance(array_kv_pair.this, exp.Array)
1033                and isinstance(array_kv_pair.expression, exp.Array)
1034            ):
1035                keys = array_kv_pair.this.expressions
1036                values = array_kv_pair.expression.expressions
1037
1038                json_object.set(
1039                    "expressions",
1040                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
1041                )
1042
1043            return json_object
1044
1045        def _parse_bracket(
1046            self, this: t.Optional[exp.Expression] = None
1047        ) -> t.Optional[exp.Expression]:
1048            bracket = super()._parse_bracket(this)
1049
1050            if this is bracket:
1051                return bracket
1052
1053            if isinstance(bracket, exp.Bracket):
1054                for expression in bracket.expressions:
1055                    name = expression.name.upper()
1056
1057                    if name not in self.BRACKET_OFFSETS:
1058                        break
1059
1060                    offset, safe = self.BRACKET_OFFSETS[name]
1061                    bracket.set("offset", offset)
1062                    bracket.set("safe", safe)
1063                    expression.replace(expression.expressions[0])
1064
1065            return bracket
1066
1067        def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]:
1068            unnest = super()._parse_unnest(with_alias=with_alias)
1069
1070            if not unnest:
1071                return None
1072
1073            unnest_expr = seq_get(unnest.expressions, 0)
1074            if unnest_expr:
1075                from sqlglot.optimizer.annotate_types import annotate_types
1076
1077                unnest_expr = annotate_types(unnest_expr, dialect=self.dialect)
1078
1079                # Unnesting a nested array (i.e array of structs) explodes the top-level struct fields,
1080                # in contrast to other dialects such as DuckDB which flattens only the array by default
1081                if unnest_expr.is_type(exp.DataType.Type.ARRAY) and any(
1082                    array_elem.is_type(exp.DataType.Type.STRUCT)
1083                    for array_elem in unnest_expr._type.expressions
1084                ):
1085                    unnest.set("explode_array", True)
1086
1087            return unnest
1088
1089        def _parse_make_interval(self) -> exp.MakeInterval:
1090            expr = exp.MakeInterval()
1091
1092            for arg_key in expr.arg_types:
1093                value = self._parse_lambda()
1094
1095                if not value:
1096                    break
1097
1098                # Non-named arguments are filled sequentially, (optionally) followed by named arguments
1099                # that can appear in any order e.g MAKE_INTERVAL(1, minute => 5, day => 2)
1100                if isinstance(value, exp.Kwarg):
1101                    arg_key = value.this.name
1102
1103                expr.set(arg_key, value)
1104
1105                self._match(TokenType.COMMA)
1106
1107            return expr
1108
1109        def _parse_predict(self) -> exp.Predict:
1110            self._match_text_seq("MODEL")
1111            this = self._parse_table()
1112
1113            self._match(TokenType.COMMA)
1114            self._match_text_seq("TABLE")
1115
1116            return self.expression(
1117                exp.Predict,
1118                this=this,
1119                expression=self._parse_table(),
1120                params_struct=self._match(TokenType.COMMA) and self._parse_bitwise(),
1121            )
1122
1123        def _parse_generate_embedding(self) -> exp.GenerateEmbedding:
1124            self._match_text_seq("MODEL")
1125            this = self._parse_table()
1126
1127            self._match(TokenType.COMMA)
1128            self._match_text_seq("TABLE")
1129
1130            return self.expression(
1131                exp.GenerateEmbedding,
1132                this=this,
1133                expression=self._parse_table(),
1134                params_struct=self._match(TokenType.COMMA) and self._parse_bitwise(),
1135            )
1136
1137        def _parse_features_at_time(self) -> exp.FeaturesAtTime:
1138            self._match(TokenType.TABLE)
1139            this = self._parse_table()
1140
1141            expr = self.expression(exp.FeaturesAtTime, this=this)
1142
1143            while self._match(TokenType.COMMA):
1144                arg = self._parse_lambda()
1145
1146                # Get the LHS of the Kwarg and set the arg to that value, e.g
1147                # "num_rows => 1" sets the expr's `num_rows` arg
1148                if arg:
1149                    expr.set(arg.this.name, arg)
1150
1151            return expr
1152
1153        def _parse_vector_search(self) -> exp.VectorSearch:
1154            self._match(TokenType.TABLE)
1155            base_table = self._parse_table()
1156
1157            self._match(TokenType.COMMA)
1158
1159            column_to_search = self._parse_bitwise()
1160            self._match(TokenType.COMMA)
1161
1162            self._match(TokenType.TABLE)
1163            query_table = self._parse_table()
1164
1165            expr = self.expression(
1166                exp.VectorSearch,
1167                this=base_table,
1168                column_to_search=column_to_search,
1169                query_table=query_table,
1170            )
1171
1172            while self._match(TokenType.COMMA):
1173                # query_column_to_search can be named argument or positional
1174                if self._match(TokenType.STRING, advance=False):
1175                    query_column = self._parse_string()
1176                    expr.set("query_column_to_search", query_column)
1177                else:
1178                    arg = self._parse_lambda()
1179                    if arg:
1180                        expr.set(arg.this.name, arg)
1181
1182            return expr
1183
1184        def _parse_export_data(self) -> exp.Export:
1185            self._match_text_seq("DATA")
1186
1187            return self.expression(
1188                exp.Export,
1189                connection=self._match_text_seq("WITH", "CONNECTION") and self._parse_table_parts(),
1190                options=self._parse_properties(),
1191                this=self._match_text_seq("AS") and self._parse_select(),
1192            )
1193
1194    class Generator(generator.Generator):
1195        INTERVAL_ALLOWS_PLURAL_FORM = False
1196        JOIN_HINTS = False
1197        QUERY_HINTS = False
1198        TABLE_HINTS = False
1199        LIMIT_FETCH = "LIMIT"
1200        RENAME_TABLE_WITH_DB = False
1201        NVL2_SUPPORTED = False
1202        UNNEST_WITH_ORDINALITY = False
1203        COLLATE_IS_FUNC = True
1204        LIMIT_ONLY_LITERALS = True
1205        SUPPORTS_TABLE_ALIAS_COLUMNS = False
1206        UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
1207        JSON_KEY_VALUE_PAIR_SEP = ","
1208        NULL_ORDERING_SUPPORTED = False
1209        IGNORE_NULLS_IN_FUNC = True
1210        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
1211        CAN_IMPLEMENT_ARRAY_ANY = True
1212        SUPPORTS_TO_NUMBER = False
1213        NAMED_PLACEHOLDER_TOKEN = "@"
1214        HEX_FUNC = "TO_HEX"
1215        WITH_PROPERTIES_PREFIX = "OPTIONS"
1216        SUPPORTS_EXPLODING_PROJECTIONS = False
1217        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
1218        SUPPORTS_UNIX_SECONDS = True
1219
1220        SAFE_JSON_PATH_KEY_RE = re.compile(r"^[_\-a-zA-Z][\-\w]*$")
1221
1222        TS_OR_DS_TYPES = (
1223            exp.TsOrDsToDatetime,
1224            exp.TsOrDsToTimestamp,
1225            exp.TsOrDsToTime,
1226            exp.TsOrDsToDate,
1227        )
1228
1229        TRANSFORMS = {
1230            **generator.Generator.TRANSFORMS,
1231            exp.ApproxTopK: rename_func("APPROX_TOP_COUNT"),
1232            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1233            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
1234            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
1235            exp.Array: inline_array_unless_query,
1236            exp.ArrayContains: _array_contains_sql,
1237            exp.ArrayFilter: filter_array_using_unnest,
1238            exp.ArrayRemove: filter_array_using_unnest,
1239            exp.ByteLength: rename_func("BYTE_LENGTH"),
1240            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
1241            exp.CollateProperty: lambda self, e: (
1242                f"DEFAULT COLLATE {self.sql(e, 'this')}"
1243                if e.args.get("default")
1244                else f"COLLATE {self.sql(e, 'this')}"
1245            ),
1246            exp.Commit: lambda *_: "COMMIT TRANSACTION",
1247            exp.CountIf: rename_func("COUNTIF"),
1248            exp.Create: _create_sql,
1249            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
1250            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
1251            exp.DateDiff: lambda self, e: self.func(
1252                "DATE_DIFF", e.this, e.expression, unit_to_var(e)
1253            ),
1254            exp.DateFromParts: rename_func("DATE"),
1255            exp.DateStrToDate: datestrtodate_sql,
1256            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
1257            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
1258            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
1259            exp.DateFromUnixDate: rename_func("DATE_FROM_UNIX_DATE"),
1260            exp.FromTimeZone: lambda self, e: self.func(
1261                "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'"
1262            ),
1263            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
1264            exp.GroupConcat: lambda self, e: groupconcat_sql(
1265                self, e, func_name="STRING_AGG", within_group=False
1266            ),
1267            exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))),
1268            exp.HexString: lambda self, e: self.hexstring_sql(e, binary_function_repr="FROM_HEX"),
1269            exp.If: if_sql(false_value="NULL"),
1270            exp.ILike: no_ilike_sql,
1271            exp.IntDiv: rename_func("DIV"),
1272            exp.Int64: rename_func("INT64"),
1273            exp.JSONBool: rename_func("BOOL"),
1274            exp.JSONExtract: _json_extract_sql,
1275            exp.JSONExtractArray: _json_extract_sql,
1276            exp.JSONExtractScalar: _json_extract_sql,
1277            exp.JSONFormat: rename_func("TO_JSON_STRING"),
1278            exp.JSONKeysAtDepth: rename_func("JSON_KEYS"),
1279            exp.JSONValueArray: rename_func("JSON_VALUE_ARRAY"),
1280            exp.Levenshtein: _levenshtein_sql,
1281            exp.Max: max_or_greatest,
1282            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
1283            exp.MD5Digest: rename_func("MD5"),
1284            exp.Min: min_or_least,
1285            exp.Normalize: lambda self, e: self.func(
1286                "NORMALIZE_AND_CASEFOLD" if e.args.get("is_casefold") else "NORMALIZE",
1287                e.this,
1288                e.args.get("form"),
1289            ),
1290            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1291            exp.RegexpExtract: lambda self, e: self.func(
1292                "REGEXP_EXTRACT",
1293                e.this,
1294                e.expression,
1295                e.args.get("position"),
1296                e.args.get("occurrence"),
1297            ),
1298            exp.RegexpExtractAll: lambda self, e: self.func(
1299                "REGEXP_EXTRACT_ALL", e.this, e.expression
1300            ),
1301            exp.RegexpReplace: regexp_replace_sql,
1302            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
1303            exp.ReturnsProperty: _returnsproperty_sql,
1304            exp.Rollback: lambda *_: "ROLLBACK TRANSACTION",
1305            exp.ParseTime: lambda self, e: self.func("PARSE_TIME", self.format_time(e), e.this),
1306            exp.ParseDatetime: lambda self, e: self.func(
1307                "PARSE_DATETIME", self.format_time(e), e.this
1308            ),
1309            exp.Select: transforms.preprocess(
1310                [
1311                    transforms.explode_projection_to_unnest(),
1312                    transforms.unqualify_unnest,
1313                    transforms.eliminate_distinct_on,
1314                    _alias_ordered_group,
1315                    transforms.eliminate_semi_and_anti_joins,
1316                ]
1317            ),
1318            exp.SHA: rename_func("SHA1"),
1319            exp.SHA2: sha256_sql,
1320            exp.StabilityProperty: lambda self, e: (
1321                "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
1322            ),
1323            exp.String: rename_func("STRING"),
1324            exp.StrPosition: lambda self, e: (
1325                strposition_sql(
1326                    self, e, func_name="INSTR", supports_position=True, supports_occurrence=True
1327                )
1328            ),
1329            exp.StrToDate: _str_to_datetime_sql,
1330            exp.StrToTime: _str_to_datetime_sql,
1331            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
1332            exp.TimeFromParts: rename_func("TIME"),
1333            exp.TimestampFromParts: rename_func("DATETIME"),
1334            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
1335            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
1336            exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"),
1337            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
1338            exp.TimeStrToTime: timestrtotime_sql,
1339            exp.Transaction: lambda *_: "BEGIN TRANSACTION",
1340            exp.TsOrDsAdd: _ts_or_ds_add_sql,
1341            exp.TsOrDsDiff: _ts_or_ds_diff_sql,
1342            exp.TsOrDsToTime: rename_func("TIME"),
1343            exp.TsOrDsToDatetime: rename_func("DATETIME"),
1344            exp.TsOrDsToTimestamp: rename_func("TIMESTAMP"),
1345            exp.Unhex: rename_func("FROM_HEX"),
1346            exp.UnixDate: rename_func("UNIX_DATE"),
1347            exp.UnixToTime: _unix_to_time_sql,
1348            exp.Uuid: lambda *_: "GENERATE_UUID()",
1349            exp.Values: _derived_table_values_to_unnest,
1350            exp.VariancePop: rename_func("VAR_POP"),
1351            exp.SafeDivide: rename_func("SAFE_DIVIDE"),
1352        }
1353
1354        SUPPORTED_JSON_PATH_PARTS = {
1355            exp.JSONPathKey,
1356            exp.JSONPathRoot,
1357            exp.JSONPathSubscript,
1358        }
1359
1360        TYPE_MAPPING = {
1361            **generator.Generator.TYPE_MAPPING,
1362            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
1363            exp.DataType.Type.BIGINT: "INT64",
1364            exp.DataType.Type.BINARY: "BYTES",
1365            exp.DataType.Type.BLOB: "BYTES",
1366            exp.DataType.Type.BOOLEAN: "BOOL",
1367            exp.DataType.Type.CHAR: "STRING",
1368            exp.DataType.Type.DECIMAL: "NUMERIC",
1369            exp.DataType.Type.DOUBLE: "FLOAT64",
1370            exp.DataType.Type.FLOAT: "FLOAT64",
1371            exp.DataType.Type.INT: "INT64",
1372            exp.DataType.Type.NCHAR: "STRING",
1373            exp.DataType.Type.NVARCHAR: "STRING",
1374            exp.DataType.Type.SMALLINT: "INT64",
1375            exp.DataType.Type.TEXT: "STRING",
1376            exp.DataType.Type.TIMESTAMP: "DATETIME",
1377            exp.DataType.Type.TIMESTAMPNTZ: "DATETIME",
1378            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
1379            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
1380            exp.DataType.Type.TINYINT: "INT64",
1381            exp.DataType.Type.ROWVERSION: "BYTES",
1382            exp.DataType.Type.UUID: "STRING",
1383            exp.DataType.Type.VARBINARY: "BYTES",
1384            exp.DataType.Type.VARCHAR: "STRING",
1385            exp.DataType.Type.VARIANT: "ANY TYPE",
1386        }
1387
1388        PROPERTIES_LOCATION = {
1389            **generator.Generator.PROPERTIES_LOCATION,
1390            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1391            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1392        }
1393
1394        # WINDOW comes after QUALIFY
1395        # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#window_clause
1396        AFTER_HAVING_MODIFIER_TRANSFORMS = {
1397            "qualify": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["qualify"],
1398            "windows": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["windows"],
1399        }
1400
1401        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
1402        RESERVED_KEYWORDS = {
1403            "all",
1404            "and",
1405            "any",
1406            "array",
1407            "as",
1408            "asc",
1409            "assert_rows_modified",
1410            "at",
1411            "between",
1412            "by",
1413            "case",
1414            "cast",
1415            "collate",
1416            "contains",
1417            "create",
1418            "cross",
1419            "cube",
1420            "current",
1421            "default",
1422            "define",
1423            "desc",
1424            "distinct",
1425            "else",
1426            "end",
1427            "enum",
1428            "escape",
1429            "except",
1430            "exclude",
1431            "exists",
1432            "extract",
1433            "false",
1434            "fetch",
1435            "following",
1436            "for",
1437            "from",
1438            "full",
1439            "group",
1440            "grouping",
1441            "groups",
1442            "hash",
1443            "having",
1444            "if",
1445            "ignore",
1446            "in",
1447            "inner",
1448            "intersect",
1449            "interval",
1450            "into",
1451            "is",
1452            "join",
1453            "lateral",
1454            "left",
1455            "like",
1456            "limit",
1457            "lookup",
1458            "merge",
1459            "natural",
1460            "new",
1461            "no",
1462            "not",
1463            "null",
1464            "nulls",
1465            "of",
1466            "on",
1467            "or",
1468            "order",
1469            "outer",
1470            "over",
1471            "partition",
1472            "preceding",
1473            "proto",
1474            "qualify",
1475            "range",
1476            "recursive",
1477            "respect",
1478            "right",
1479            "rollup",
1480            "rows",
1481            "select",
1482            "set",
1483            "some",
1484            "struct",
1485            "tablesample",
1486            "then",
1487            "to",
1488            "treat",
1489            "true",
1490            "unbounded",
1491            "union",
1492            "unnest",
1493            "using",
1494            "when",
1495            "where",
1496            "window",
1497            "with",
1498            "within",
1499        }
1500
1501        def datetrunc_sql(self, expression: exp.DateTrunc) -> str:
1502            unit = expression.unit
1503            unit_sql = unit.name if unit.is_string else self.sql(unit)
1504            return self.func("DATE_TRUNC", expression.this, unit_sql, expression.args.get("zone"))
1505
1506        def mod_sql(self, expression: exp.Mod) -> str:
1507            this = expression.this
1508            expr = expression.expression
1509            return self.func(
1510                "MOD",
1511                this.unnest() if isinstance(this, exp.Paren) else this,
1512                expr.unnest() if isinstance(expr, exp.Paren) else expr,
1513            )
1514
1515        def column_parts(self, expression: exp.Column) -> str:
1516            if expression.meta.get("quoted_column"):
1517                # If a column reference is of the form `dataset.table`.name, we need
1518                # to preserve the quoted table path, otherwise the reference breaks
1519                table_parts = ".".join(p.name for p in expression.parts[:-1])
1520                table_path = self.sql(exp.Identifier(this=table_parts, quoted=True))
1521                return f"{table_path}.{self.sql(expression, 'this')}"
1522
1523            return super().column_parts(expression)
1524
1525        def table_parts(self, expression: exp.Table) -> str:
1526            # Depending on the context, `x.y` may not resolve to the same data source as `x`.`y`, so
1527            # we need to make sure the correct quoting is used in each case.
1528            #
1529            # For example, if there is a CTE x that clashes with a schema name, then the former will
1530            # return the table y in that schema, whereas the latter will return the CTE's y column:
1531            #
1532            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x.y`   -> cross join
1533            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x`.`y` -> implicit unnest
1534            if expression.meta.get("quoted_table"):
1535                table_parts = ".".join(p.name for p in expression.parts)
1536                return self.sql(exp.Identifier(this=table_parts, quoted=True))
1537
1538            return super().table_parts(expression)
1539
1540        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1541            this = expression.this
1542            if isinstance(this, exp.TsOrDsToDatetime):
1543                func_name = "FORMAT_DATETIME"
1544            elif isinstance(this, exp.TsOrDsToTimestamp):
1545                func_name = "FORMAT_TIMESTAMP"
1546            elif isinstance(this, exp.TsOrDsToTime):
1547                func_name = "FORMAT_TIME"
1548            else:
1549                func_name = "FORMAT_DATE"
1550
1551            time_expr = this if isinstance(this, self.TS_OR_DS_TYPES) else expression
1552            return self.func(
1553                func_name, self.format_time(expression), time_expr.this, expression.args.get("zone")
1554            )
1555
1556        def eq_sql(self, expression: exp.EQ) -> str:
1557            # Operands of = cannot be NULL in BigQuery
1558            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
1559                if not isinstance(expression.parent, exp.Update):
1560                    return "NULL"
1561
1562            return self.binary(expression, "=")
1563
1564        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
1565            parent = expression.parent
1566
1567            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
1568            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
1569            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
1570                return self.func(
1571                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
1572                )
1573
1574            return super().attimezone_sql(expression)
1575
1576        def trycast_sql(self, expression: exp.TryCast) -> str:
1577            return self.cast_sql(expression, safe_prefix="SAFE_")
1578
1579        def bracket_sql(self, expression: exp.Bracket) -> str:
1580            this = expression.this
1581            expressions = expression.expressions
1582
1583            if len(expressions) == 1 and this and this.is_type(exp.DataType.Type.STRUCT):
1584                arg = expressions[0]
1585                if arg.type is None:
1586                    from sqlglot.optimizer.annotate_types import annotate_types
1587
1588                    arg = annotate_types(arg, dialect=self.dialect)
1589
1590                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
1591                    # BQ doesn't support bracket syntax with string values for structs
1592                    return f"{self.sql(this)}.{arg.name}"
1593
1594            expressions_sql = self.expressions(expression, flat=True)
1595            offset = expression.args.get("offset")
1596
1597            if offset == 0:
1598                expressions_sql = f"OFFSET({expressions_sql})"
1599            elif offset == 1:
1600                expressions_sql = f"ORDINAL({expressions_sql})"
1601            elif offset is not None:
1602                self.unsupported(f"Unsupported array offset: {offset}")
1603
1604            if expression.args.get("safe"):
1605                expressions_sql = f"SAFE_{expressions_sql}"
1606
1607            return f"{self.sql(this)}[{expressions_sql}]"
1608
1609        def in_unnest_op(self, expression: exp.Unnest) -> str:
1610            return self.sql(expression)
1611
1612        def version_sql(self, expression: exp.Version) -> str:
1613            if expression.name == "TIMESTAMP":
1614                expression.set("this", "SYSTEM_TIME")
1615            return super().version_sql(expression)
1616
1617        def contains_sql(self, expression: exp.Contains) -> str:
1618            this = expression.this
1619            expr = expression.expression
1620
1621            if isinstance(this, exp.Lower) and isinstance(expr, exp.Lower):
1622                this = this.this
1623                expr = expr.this
1624
1625            return self.func("CONTAINS_SUBSTR", this, expr, expression.args.get("json_scope"))
1626
1627        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1628            this = expression.this
1629
1630            # This ensures that inline type-annotated ARRAY literals like ARRAY<INT64>[1, 2, 3]
1631            # are roundtripped unaffected. The inner check excludes ARRAY(SELECT ...) expressions,
1632            # because they aren't literals and so the above syntax is invalid BigQuery.
1633            if isinstance(this, exp.Array):
1634                elem = seq_get(this.expressions, 0)
1635                if not (elem and elem.find(exp.Query)):
1636                    return f"{self.sql(expression, 'to')}{self.sql(this)}"
1637
1638            return super().cast_sql(expression, safe_prefix=safe_prefix)
1639
1640        def declareitem_sql(self, expression: exp.DeclareItem) -> str:
1641            variables = self.expressions(expression, "this")
1642            default = self.sql(expression, "default")
1643            default = f" DEFAULT {default}" if default else ""
1644            kind = self.sql(expression, "kind")
1645            kind = f" {kind}" if kind else ""
1646
1647            return f"{variables}{kind}{default}"
logger = <Logger sqlglot (WARNING)>
DQUOTES_ESCAPING_JSON_FUNCTIONS = ('JSON_QUERY', 'JSON_VALUE', 'JSON_QUERY_ARRAY')
class BigQuery(sqlglot.dialects.dialect.Dialect):
 441class BigQuery(Dialect):
 442    WEEK_OFFSET = -1
 443    UNNEST_COLUMN_ONLY = True
 444    SUPPORTS_USER_DEFINED_TYPES = False
 445    SUPPORTS_SEMI_ANTI_JOIN = False
 446    LOG_BASE_FIRST = False
 447    HEX_LOWERCASE = True
 448    FORCE_EARLY_ALIAS_REF_EXPANSION = True
 449    PRESERVE_ORIGINAL_NAMES = True
 450    HEX_STRING_IS_INTEGER_TYPE = True
 451
 452    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
 453    NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
 454
 455    # bigquery udfs are case sensitive
 456    NORMALIZE_FUNCTIONS = False
 457
 458    # https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_elements_date_time
 459    TIME_MAPPING = {
 460        "%D": "%m/%d/%y",
 461        "%E6S": "%S.%f",
 462        "%e": "%-d",
 463    }
 464
 465    FORMAT_MAPPING = {
 466        "DD": "%d",
 467        "MM": "%m",
 468        "MON": "%b",
 469        "MONTH": "%B",
 470        "YYYY": "%Y",
 471        "YY": "%y",
 472        "HH": "%I",
 473        "HH12": "%I",
 474        "HH24": "%H",
 475        "MI": "%M",
 476        "SS": "%S",
 477        "SSSSS": "%f",
 478        "TZH": "%z",
 479    }
 480
 481    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
 482    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
 483    # https://cloud.google.com/bigquery/docs/querying-wildcard-tables#scanning_a_range_of_tables_using_table_suffix
 484    # https://cloud.google.com/bigquery/docs/query-cloud-storage-data#query_the_file_name_pseudo-column
 485    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE", "_TABLE_SUFFIX", "_FILE_NAME"}
 486
 487    # All set operations require either a DISTINCT or ALL specifier
 488    SET_OP_DISTINCT_BY_DEFAULT = dict.fromkeys((exp.Except, exp.Intersect, exp.Union), None)
 489
 490    # https://cloud.google.com/bigquery/docs/reference/standard-sql/navigation_functions#percentile_cont
 491    COERCES_TO = {
 492        **TypeAnnotator.COERCES_TO,
 493        exp.DataType.Type.BIGDECIMAL: {exp.DataType.Type.DOUBLE},
 494    }
 495    COERCES_TO[exp.DataType.Type.DECIMAL] |= {exp.DataType.Type.BIGDECIMAL}
 496
 497    # BigQuery maps Type.TIMESTAMP to DATETIME, so we need to amend the inferred types
 498    TYPE_TO_EXPRESSIONS = {
 499        **Dialect.TYPE_TO_EXPRESSIONS,
 500        exp.DataType.Type.TIMESTAMPTZ: Dialect.TYPE_TO_EXPRESSIONS[exp.DataType.Type.TIMESTAMP],
 501    }
 502    TYPE_TO_EXPRESSIONS.pop(exp.DataType.Type.TIMESTAMP)
 503
 504    ANNOTATORS = {
 505        **Dialect.ANNOTATORS,
 506        **{
 507            expr_type: annotate_with_type_lambda(data_type)
 508            for data_type, expressions in TYPE_TO_EXPRESSIONS.items()
 509            for expr_type in expressions
 510        },
 511        **{
 512            expr_type: lambda self, e: _annotate_math_functions(self, e)
 513            for expr_type in (exp.Floor, exp.Ceil, exp.Log, exp.Ln, exp.Sqrt, exp.Exp, exp.Round)
 514        },
 515        **{
 516            expr_type: lambda self, e: self._annotate_by_args(e, "this")
 517            for expr_type in (
 518                exp.Left,
 519                exp.Right,
 520                exp.Lower,
 521                exp.Upper,
 522                exp.Pad,
 523                exp.Trim,
 524                exp.RegexpExtract,
 525                exp.RegexpReplace,
 526                exp.Repeat,
 527                exp.Substring,
 528            )
 529        },
 530        exp.ApproxTopSum: lambda self, e: _annotate_by_args_approx_top(self, e),
 531        exp.ApproxTopK: lambda self, e: _annotate_by_args_approx_top(self, e),
 532        exp.ApproxQuantiles: lambda self, e: self._annotate_by_args(e, "this", array=True),
 533        exp.ArgMax: lambda self, e: self._annotate_by_args(e, "this"),
 534        exp.ArgMin: lambda self, e: self._annotate_by_args(e, "this"),
 535        exp.Array: _annotate_array,
 536        exp.ArrayConcat: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
 537        exp.Ascii: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 538        exp.BitwiseAndAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 539        exp.BitwiseOrAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 540        exp.BitwiseXorAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 541        exp.BitwiseCountAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 542        exp.ByteLength: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 543        exp.ByteString: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 544        exp.CodePointsToBytes: lambda self, e: self._annotate_with_type(
 545            e, exp.DataType.Type.BINARY
 546        ),
 547        exp.CodePointsToString: lambda self, e: self._annotate_with_type(
 548            e, exp.DataType.Type.VARCHAR
 549        ),
 550        exp.Concat: _annotate_concat,
 551        exp.Contains: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BOOLEAN),
 552        exp.Corr: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 553        exp.CovarPop: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 554        exp.CovarSamp: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 555        exp.CumeDist: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 556        exp.DateFromUnixDate: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DATE),
 557        exp.DateTrunc: lambda self, e: self._annotate_by_args(e, "this"),
 558        exp.DenseRank: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 559        exp.FarmFingerprint: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 560        exp.FirstValue: lambda self, e: self._annotate_by_args(e, "this"),
 561        exp.Unhex: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 562        exp.Float64: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 563        exp.Format: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 564        exp.GenerateTimestampArray: lambda self, e: self._annotate_with_type(
 565            e, exp.DataType.build("ARRAY<TIMESTAMP>", dialect="bigquery")
 566        ),
 567        exp.Grouping: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 568        exp.IgnoreNulls: lambda self, e: self._annotate_by_args(e, "this"),
 569        exp.JSONArray: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 570        exp.JSONArrayAppend: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 571        exp.JSONArrayInsert: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 572        exp.JSONBool: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BOOLEAN),
 573        exp.JSONExtractScalar: lambda self, e: self._annotate_with_type(
 574            e, exp.DataType.Type.VARCHAR
 575        ),
 576        exp.JSONExtract: lambda self, e: self._annotate_by_args(e, "this"),
 577        exp.JSONExtractArray: lambda self, e: self._annotate_by_args(e, "this", array=True),
 578        exp.JSONFormat: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 579        exp.JSONKeysAtDepth: lambda self, e: self._annotate_with_type(
 580            e, exp.DataType.build("ARRAY<VARCHAR>", dialect="bigquery")
 581        ),
 582        exp.JSONObject: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 583        exp.JSONRemove: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 584        exp.JSONSet: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 585        exp.JSONStripNulls: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 586        exp.JSONType: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 587        exp.JSONValueArray: lambda self, e: self._annotate_with_type(
 588            e, exp.DataType.build("ARRAY<VARCHAR>", dialect="bigquery")
 589        ),
 590        exp.Lag: lambda self, e: self._annotate_by_args(e, "this", "default"),
 591        exp.Lead: lambda self, e: self._annotate_by_args(e, "this"),
 592        exp.LowerHex: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 593        exp.LaxBool: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BOOLEAN),
 594        exp.LaxFloat64: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 595        exp.LaxInt64: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 596        exp.LaxString: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 597        exp.MD5Digest: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 598        exp.Normalize: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 599        exp.NthValue: lambda self, e: self._annotate_by_args(e, "this"),
 600        exp.Ntile: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 601        exp.ParseTime: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.TIME),
 602        exp.ParseDatetime: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DATETIME),
 603        exp.ParseBignumeric: lambda self, e: self._annotate_with_type(
 604            e, exp.DataType.Type.BIGDECIMAL
 605        ),
 606        exp.ParseNumeric: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DECIMAL),
 607        exp.PercentileCont: lambda self, e: _annotate_perncentile_cont(self, e),
 608        exp.PercentileDisc: lambda self, e: self._annotate_by_args(e, "this"),
 609        exp.PercentRank: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 610        exp.Rank: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 611        exp.RegexpExtractAll: lambda self, e: self._annotate_by_args(e, "this", array=True),
 612        exp.RegexpInstr: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 613        exp.Replace: lambda self, e: self._annotate_by_args(e, "this"),
 614        exp.RespectNulls: lambda self, e: self._annotate_by_args(e, "this"),
 615        exp.Reverse: lambda self, e: self._annotate_by_args(e, "this"),
 616        exp.RowNumber: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 617        exp.SafeConvertBytesToString: lambda self, e: self._annotate_with_type(
 618            e, exp.DataType.Type.VARCHAR
 619        ),
 620        exp.Soundex: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 621        exp.SHA: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 622        exp.SHA2: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 623        exp.Sign: lambda self, e: self._annotate_by_args(e, "this"),
 624        exp.Split: lambda self, e: self._annotate_by_args(e, "this", array=True),
 625        exp.TimestampFromParts: lambda self, e: self._annotate_with_type(
 626            e, exp.DataType.Type.DATETIME
 627        ),
 628        exp.TimestampTrunc: lambda self, e: self._annotate_by_args(e, "this"),
 629        exp.TimeFromParts: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.TIME),
 630        exp.TimeTrunc: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.TIME),
 631        exp.ToCodePoints: lambda self, e: self._annotate_with_type(
 632            e, exp.DataType.build("ARRAY<BIGINT>", dialect="bigquery")
 633        ),
 634        exp.TsOrDsToTime: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.TIME),
 635        exp.Translate: lambda self, e: self._annotate_by_args(e, "this"),
 636        exp.Unicode: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 637    }
 638
 639    def normalize_identifier(self, expression: E) -> E:
 640        if (
 641            isinstance(expression, exp.Identifier)
 642            and self.normalization_strategy is NormalizationStrategy.CASE_INSENSITIVE
 643        ):
 644            parent = expression.parent
 645            while isinstance(parent, exp.Dot):
 646                parent = parent.parent
 647
 648            # In BigQuery, CTEs are case-insensitive, but UDF and table names are case-sensitive
 649            # by default. The following check uses a heuristic to detect tables based on whether
 650            # they are qualified. This should generally be correct, because tables in BigQuery
 651            # must be qualified with at least a dataset, unless @@dataset_id is set.
 652            case_sensitive = (
 653                isinstance(parent, exp.UserDefinedFunction)
 654                or (
 655                    isinstance(parent, exp.Table)
 656                    and parent.db
 657                    and (parent.meta.get("quoted_table") or not parent.meta.get("maybe_column"))
 658                )
 659                or expression.meta.get("is_table")
 660            )
 661            if not case_sensitive:
 662                expression.set("this", expression.this.lower())
 663
 664            return t.cast(E, expression)
 665
 666        return super().normalize_identifier(expression)
 667
 668    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
 669        VAR_TOKENS = {
 670            TokenType.DASH,
 671            TokenType.VAR,
 672        }
 673
 674    class Tokenizer(tokens.Tokenizer):
 675        QUOTES = ["'", '"', '"""', "'''"]
 676        COMMENTS = ["--", "#", ("/*", "*/")]
 677        IDENTIFIERS = ["`"]
 678        STRING_ESCAPES = ["\\"]
 679
 680        HEX_STRINGS = [("0x", ""), ("0X", "")]
 681
 682        BYTE_STRINGS = [
 683            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
 684        ]
 685
 686        RAW_STRINGS = [
 687            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
 688        ]
 689
 690        NESTED_COMMENTS = False
 691
 692        KEYWORDS = {
 693            **tokens.Tokenizer.KEYWORDS,
 694            "ANY TYPE": TokenType.VARIANT,
 695            "BEGIN": TokenType.COMMAND,
 696            "BEGIN TRANSACTION": TokenType.BEGIN,
 697            "BYTEINT": TokenType.INT,
 698            "BYTES": TokenType.BINARY,
 699            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
 700            "DATETIME": TokenType.TIMESTAMP,
 701            "DECLARE": TokenType.DECLARE,
 702            "ELSEIF": TokenType.COMMAND,
 703            "EXCEPTION": TokenType.COMMAND,
 704            "EXPORT": TokenType.EXPORT,
 705            "FLOAT64": TokenType.DOUBLE,
 706            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
 707            "LOOP": TokenType.COMMAND,
 708            "MODEL": TokenType.MODEL,
 709            "NOT DETERMINISTIC": TokenType.VOLATILE,
 710            "RECORD": TokenType.STRUCT,
 711            "REPEAT": TokenType.COMMAND,
 712            "TIMESTAMP": TokenType.TIMESTAMPTZ,
 713            "WHILE": TokenType.COMMAND,
 714        }
 715        KEYWORDS.pop("DIV")
 716        KEYWORDS.pop("VALUES")
 717        KEYWORDS.pop("/*+")
 718
 719    class Parser(parser.Parser):
 720        PREFIXED_PIVOT_COLUMNS = True
 721        LOG_DEFAULTS_TO_LN = True
 722        SUPPORTS_IMPLICIT_UNNEST = True
 723        JOINS_HAVE_EQUAL_PRECEDENCE = True
 724
 725        # BigQuery does not allow ASC/DESC to be used as an identifier
 726        ID_VAR_TOKENS = parser.Parser.ID_VAR_TOKENS - {TokenType.ASC, TokenType.DESC}
 727        ALIAS_TOKENS = parser.Parser.ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 728        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 729        COMMENT_TABLE_ALIAS_TOKENS = parser.Parser.COMMENT_TABLE_ALIAS_TOKENS - {
 730            TokenType.ASC,
 731            TokenType.DESC,
 732        }
 733        UPDATE_ALIAS_TOKENS = parser.Parser.UPDATE_ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 734
 735        FUNCTIONS = {
 736            **parser.Parser.FUNCTIONS,
 737            "APPROX_TOP_COUNT": exp.ApproxTopK.from_arg_list,
 738            "BOOL": exp.JSONBool.from_arg_list,
 739            "CONTAINS_SUBSTR": _build_contains_substring,
 740            "DATE": _build_date,
 741            "DATE_ADD": build_date_delta_with_interval(exp.DateAdd),
 742            "DATE_SUB": build_date_delta_with_interval(exp.DateSub),
 743            "DATE_TRUNC": lambda args: exp.DateTrunc(
 744                unit=seq_get(args, 1),
 745                this=seq_get(args, 0),
 746                zone=seq_get(args, 2),
 747            ),
 748            "DATETIME": _build_datetime,
 749            "DATETIME_ADD": build_date_delta_with_interval(exp.DatetimeAdd),
 750            "DATETIME_SUB": build_date_delta_with_interval(exp.DatetimeSub),
 751            "DIV": binary_from_function(exp.IntDiv),
 752            "EDIT_DISTANCE": _build_levenshtein,
 753            "FORMAT_DATE": _build_format_time(exp.TsOrDsToDate),
 754            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
 755            "JSON_EXTRACT_SCALAR": _build_extract_json_with_default_path(exp.JSONExtractScalar),
 756            "JSON_EXTRACT_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
 757            "JSON_EXTRACT_STRING_ARRAY": _build_extract_json_with_default_path(exp.JSONValueArray),
 758            "JSON_KEYS": exp.JSONKeysAtDepth.from_arg_list,
 759            "JSON_QUERY": parser.build_extract_json_with_path(exp.JSONExtract),
 760            "JSON_QUERY_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
 761            "JSON_STRIP_NULLS": _build_json_strip_nulls,
 762            "JSON_VALUE": _build_extract_json_with_default_path(exp.JSONExtractScalar),
 763            "JSON_VALUE_ARRAY": _build_extract_json_with_default_path(exp.JSONValueArray),
 764            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 765            "MD5": exp.MD5Digest.from_arg_list,
 766            "NORMALIZE_AND_CASEFOLD": lambda args: exp.Normalize(
 767                this=seq_get(args, 0), form=seq_get(args, 1), is_casefold=True
 768            ),
 769            "OCTET_LENGTH": exp.ByteLength.from_arg_list,
 770            "TO_HEX": _build_to_hex,
 771            "PARSE_DATE": lambda args: build_formatted_time(exp.StrToDate, "bigquery")(
 772                [seq_get(args, 1), seq_get(args, 0)]
 773            ),
 774            "PARSE_TIME": lambda args: build_formatted_time(exp.ParseTime, "bigquery")(
 775                [seq_get(args, 1), seq_get(args, 0)]
 776            ),
 777            "PARSE_TIMESTAMP": _build_parse_timestamp,
 778            "PARSE_DATETIME": lambda args: build_formatted_time(exp.ParseDatetime, "bigquery")(
 779                [seq_get(args, 1), seq_get(args, 0)]
 780            ),
 781            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
 782            "REGEXP_EXTRACT": _build_regexp_extract(exp.RegexpExtract),
 783            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 784            "REGEXP_EXTRACT_ALL": _build_regexp_extract(
 785                exp.RegexpExtractAll, default_group=exp.Literal.number(0)
 786            ),
 787            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
 788            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
 789            "SPLIT": lambda args: exp.Split(
 790                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
 791                this=seq_get(args, 0),
 792                expression=seq_get(args, 1) or exp.Literal.string(","),
 793            ),
 794            "STRPOS": exp.StrPosition.from_arg_list,
 795            "TIME": _build_time,
 796            "TIME_ADD": build_date_delta_with_interval(exp.TimeAdd),
 797            "TIME_SUB": build_date_delta_with_interval(exp.TimeSub),
 798            "TIMESTAMP": _build_timestamp,
 799            "TIMESTAMP_ADD": build_date_delta_with_interval(exp.TimestampAdd),
 800            "TIMESTAMP_SUB": build_date_delta_with_interval(exp.TimestampSub),
 801            "TIMESTAMP_MICROS": lambda args: exp.UnixToTime(
 802                this=seq_get(args, 0), scale=exp.UnixToTime.MICROS
 803            ),
 804            "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime(
 805                this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS
 806            ),
 807            "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)),
 808            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
 809            "FORMAT_DATETIME": _build_format_time(exp.TsOrDsToDatetime),
 810            "FORMAT_TIMESTAMP": _build_format_time(exp.TsOrDsToTimestamp),
 811            "FORMAT_TIME": _build_format_time(exp.TsOrDsToTime),
 812            "FROM_HEX": exp.Unhex.from_arg_list,
 813            "WEEK": lambda args: exp.WeekStart(this=exp.var(seq_get(args, 0))),
 814        }
 815
 816        FUNCTION_PARSERS = {
 817            **parser.Parser.FUNCTION_PARSERS,
 818            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
 819            "JSON_ARRAY": lambda self: self.expression(
 820                exp.JSONArray, expressions=self._parse_csv(self._parse_bitwise)
 821            ),
 822            "MAKE_INTERVAL": lambda self: self._parse_make_interval(),
 823            "PREDICT": lambda self: self._parse_predict(),
 824            "FEATURES_AT_TIME": lambda self: self._parse_features_at_time(),
 825            "GENERATE_EMBEDDING": lambda self: self._parse_generate_embedding(),
 826            "VECTOR_SEARCH": lambda self: self._parse_vector_search(),
 827        }
 828        FUNCTION_PARSERS.pop("TRIM")
 829
 830        NO_PAREN_FUNCTIONS = {
 831            **parser.Parser.NO_PAREN_FUNCTIONS,
 832            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
 833        }
 834
 835        NESTED_TYPE_TOKENS = {
 836            *parser.Parser.NESTED_TYPE_TOKENS,
 837            TokenType.TABLE,
 838        }
 839
 840        PROPERTY_PARSERS = {
 841            **parser.Parser.PROPERTY_PARSERS,
 842            "NOT DETERMINISTIC": lambda self: self.expression(
 843                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
 844            ),
 845            "OPTIONS": lambda self: self._parse_with_property(),
 846        }
 847
 848        CONSTRAINT_PARSERS = {
 849            **parser.Parser.CONSTRAINT_PARSERS,
 850            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
 851        }
 852
 853        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
 854        RANGE_PARSERS.pop(TokenType.OVERLAPS)
 855
 856        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
 857
 858        DASHED_TABLE_PART_FOLLOW_TOKENS = {TokenType.DOT, TokenType.L_PAREN, TokenType.R_PAREN}
 859
 860        STATEMENT_PARSERS = {
 861            **parser.Parser.STATEMENT_PARSERS,
 862            TokenType.ELSE: lambda self: self._parse_as_command(self._prev),
 863            TokenType.END: lambda self: self._parse_as_command(self._prev),
 864            TokenType.FOR: lambda self: self._parse_for_in(),
 865            TokenType.EXPORT: lambda self: self._parse_export_data(),
 866            TokenType.DECLARE: lambda self: self._parse_declare(),
 867        }
 868
 869        BRACKET_OFFSETS = {
 870            "OFFSET": (0, False),
 871            "ORDINAL": (1, False),
 872            "SAFE_OFFSET": (0, True),
 873            "SAFE_ORDINAL": (1, True),
 874        }
 875
 876        def _parse_for_in(self) -> t.Union[exp.ForIn, exp.Command]:
 877            index = self._index
 878            this = self._parse_range()
 879            self._match_text_seq("DO")
 880            if self._match(TokenType.COMMAND):
 881                self._retreat(index)
 882                return self._parse_as_command(self._prev)
 883            return self.expression(exp.ForIn, this=this, expression=self._parse_statement())
 884
 885        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
 886            this = super()._parse_table_part(schema=schema) or self._parse_number()
 887
 888            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
 889            if isinstance(this, exp.Identifier):
 890                table_name = this.name
 891                while self._match(TokenType.DASH, advance=False) and self._next:
 892                    start = self._curr
 893                    while self._is_connected() and not self._match_set(
 894                        self.DASHED_TABLE_PART_FOLLOW_TOKENS, advance=False
 895                    ):
 896                        self._advance()
 897
 898                    if start == self._curr:
 899                        break
 900
 901                    table_name += self._find_sql(start, self._prev)
 902
 903                this = exp.Identifier(
 904                    this=table_name, quoted=this.args.get("quoted")
 905                ).update_positions(this)
 906            elif isinstance(this, exp.Literal):
 907                table_name = this.name
 908
 909                if self._is_connected() and self._parse_var(any_token=True):
 910                    table_name += self._prev.text
 911
 912                this = exp.Identifier(this=table_name, quoted=True).update_positions(this)
 913
 914            return this
 915
 916        def _parse_table_parts(
 917            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 918        ) -> exp.Table:
 919            table = super()._parse_table_parts(
 920                schema=schema, is_db_reference=is_db_reference, wildcard=True
 921            )
 922
 923            # proj-1.db.tbl -- `1.` is tokenized as a float so we need to unravel it here
 924            if not table.catalog:
 925                if table.db:
 926                    previous_db = table.args["db"]
 927                    parts = table.db.split(".")
 928                    if len(parts) == 2 and not table.args["db"].quoted:
 929                        table.set(
 930                            "catalog", exp.Identifier(this=parts[0]).update_positions(previous_db)
 931                        )
 932                        table.set("db", exp.Identifier(this=parts[1]).update_positions(previous_db))
 933                else:
 934                    previous_this = table.this
 935                    parts = table.name.split(".")
 936                    if len(parts) == 2 and not table.this.quoted:
 937                        table.set(
 938                            "db", exp.Identifier(this=parts[0]).update_positions(previous_this)
 939                        )
 940                        table.set(
 941                            "this", exp.Identifier(this=parts[1]).update_positions(previous_this)
 942                        )
 943
 944            if isinstance(table.this, exp.Identifier) and any("." in p.name for p in table.parts):
 945                alias = table.this
 946                catalog, db, this, *rest = (
 947                    exp.to_identifier(p, quoted=True)
 948                    for p in split_num_words(".".join(p.name for p in table.parts), ".", 3)
 949                )
 950
 951                for part in (catalog, db, this):
 952                    if part:
 953                        part.update_positions(table.this)
 954
 955                if rest and this:
 956                    this = exp.Dot.build([this, *rest])  # type: ignore
 957
 958                table = exp.Table(
 959                    this=this, db=db, catalog=catalog, pivots=table.args.get("pivots")
 960                )
 961                table.meta["quoted_table"] = True
 962            else:
 963                alias = None
 964
 965            # The `INFORMATION_SCHEMA` views in BigQuery need to be qualified by a region or
 966            # dataset, so if the project identifier is omitted we need to fix the ast so that
 967            # the `INFORMATION_SCHEMA.X` bit is represented as a single (quoted) Identifier.
 968            # Otherwise, we wouldn't correctly qualify a `Table` node that references these
 969            # views, because it would seem like the "catalog" part is set, when it'd actually
 970            # be the region/dataset. Merging the two identifiers into a single one is done to
 971            # avoid producing a 4-part Table reference, which would cause issues in the schema
 972            # module, when there are 3-part table names mixed with information schema views.
 973            #
 974            # See: https://cloud.google.com/bigquery/docs/information-schema-intro#syntax
 975            table_parts = table.parts
 976            if len(table_parts) > 1 and table_parts[-2].name.upper() == "INFORMATION_SCHEMA":
 977                # We need to alias the table here to avoid breaking existing qualified columns.
 978                # This is expected to be safe, because if there's an actual alias coming up in
 979                # the token stream, it will overwrite this one. If there isn't one, we are only
 980                # exposing the name that can be used to reference the view explicitly (a no-op).
 981                exp.alias_(
 982                    table,
 983                    t.cast(exp.Identifier, alias or table_parts[-1]),
 984                    table=True,
 985                    copy=False,
 986                )
 987
 988                info_schema_view = f"{table_parts[-2].name}.{table_parts[-1].name}"
 989                new_this = exp.Identifier(this=info_schema_view, quoted=True).update_positions(
 990                    line=table_parts[-2].meta.get("line"),
 991                    col=table_parts[-1].meta.get("col"),
 992                    start=table_parts[-2].meta.get("start"),
 993                    end=table_parts[-1].meta.get("end"),
 994                )
 995                table.set("this", new_this)
 996                table.set("db", seq_get(table_parts, -3))
 997                table.set("catalog", seq_get(table_parts, -4))
 998
 999            return table
1000
1001        def _parse_column(self) -> t.Optional[exp.Expression]:
1002            column = super()._parse_column()
1003            if isinstance(column, exp.Column):
1004                parts = column.parts
1005                if any("." in p.name for p in parts):
1006                    catalog, db, table, this, *rest = (
1007                        exp.to_identifier(p, quoted=True)
1008                        for p in split_num_words(".".join(p.name for p in parts), ".", 4)
1009                    )
1010
1011                    if rest and this:
1012                        this = exp.Dot.build([this, *rest])  # type: ignore
1013
1014                    column = exp.Column(this=this, table=table, db=db, catalog=catalog)
1015                    column.meta["quoted_column"] = True
1016
1017            return column
1018
1019        @t.overload
1020        def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: ...
1021
1022        @t.overload
1023        def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: ...
1024
1025        def _parse_json_object(self, agg=False):
1026            json_object = super()._parse_json_object()
1027            array_kv_pair = seq_get(json_object.expressions, 0)
1028
1029            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
1030            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
1031            if (
1032                array_kv_pair
1033                and isinstance(array_kv_pair.this, exp.Array)
1034                and isinstance(array_kv_pair.expression, exp.Array)
1035            ):
1036                keys = array_kv_pair.this.expressions
1037                values = array_kv_pair.expression.expressions
1038
1039                json_object.set(
1040                    "expressions",
1041                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
1042                )
1043
1044            return json_object
1045
1046        def _parse_bracket(
1047            self, this: t.Optional[exp.Expression] = None
1048        ) -> t.Optional[exp.Expression]:
1049            bracket = super()._parse_bracket(this)
1050
1051            if this is bracket:
1052                return bracket
1053
1054            if isinstance(bracket, exp.Bracket):
1055                for expression in bracket.expressions:
1056                    name = expression.name.upper()
1057
1058                    if name not in self.BRACKET_OFFSETS:
1059                        break
1060
1061                    offset, safe = self.BRACKET_OFFSETS[name]
1062                    bracket.set("offset", offset)
1063                    bracket.set("safe", safe)
1064                    expression.replace(expression.expressions[0])
1065
1066            return bracket
1067
1068        def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]:
1069            unnest = super()._parse_unnest(with_alias=with_alias)
1070
1071            if not unnest:
1072                return None
1073
1074            unnest_expr = seq_get(unnest.expressions, 0)
1075            if unnest_expr:
1076                from sqlglot.optimizer.annotate_types import annotate_types
1077
1078                unnest_expr = annotate_types(unnest_expr, dialect=self.dialect)
1079
1080                # Unnesting a nested array (i.e array of structs) explodes the top-level struct fields,
1081                # in contrast to other dialects such as DuckDB which flattens only the array by default
1082                if unnest_expr.is_type(exp.DataType.Type.ARRAY) and any(
1083                    array_elem.is_type(exp.DataType.Type.STRUCT)
1084                    for array_elem in unnest_expr._type.expressions
1085                ):
1086                    unnest.set("explode_array", True)
1087
1088            return unnest
1089
1090        def _parse_make_interval(self) -> exp.MakeInterval:
1091            expr = exp.MakeInterval()
1092
1093            for arg_key in expr.arg_types:
1094                value = self._parse_lambda()
1095
1096                if not value:
1097                    break
1098
1099                # Non-named arguments are filled sequentially, (optionally) followed by named arguments
1100                # that can appear in any order e.g MAKE_INTERVAL(1, minute => 5, day => 2)
1101                if isinstance(value, exp.Kwarg):
1102                    arg_key = value.this.name
1103
1104                expr.set(arg_key, value)
1105
1106                self._match(TokenType.COMMA)
1107
1108            return expr
1109
1110        def _parse_predict(self) -> exp.Predict:
1111            self._match_text_seq("MODEL")
1112            this = self._parse_table()
1113
1114            self._match(TokenType.COMMA)
1115            self._match_text_seq("TABLE")
1116
1117            return self.expression(
1118                exp.Predict,
1119                this=this,
1120                expression=self._parse_table(),
1121                params_struct=self._match(TokenType.COMMA) and self._parse_bitwise(),
1122            )
1123
1124        def _parse_generate_embedding(self) -> exp.GenerateEmbedding:
1125            self._match_text_seq("MODEL")
1126            this = self._parse_table()
1127
1128            self._match(TokenType.COMMA)
1129            self._match_text_seq("TABLE")
1130
1131            return self.expression(
1132                exp.GenerateEmbedding,
1133                this=this,
1134                expression=self._parse_table(),
1135                params_struct=self._match(TokenType.COMMA) and self._parse_bitwise(),
1136            )
1137
1138        def _parse_features_at_time(self) -> exp.FeaturesAtTime:
1139            self._match(TokenType.TABLE)
1140            this = self._parse_table()
1141
1142            expr = self.expression(exp.FeaturesAtTime, this=this)
1143
1144            while self._match(TokenType.COMMA):
1145                arg = self._parse_lambda()
1146
1147                # Get the LHS of the Kwarg and set the arg to that value, e.g
1148                # "num_rows => 1" sets the expr's `num_rows` arg
1149                if arg:
1150                    expr.set(arg.this.name, arg)
1151
1152            return expr
1153
1154        def _parse_vector_search(self) -> exp.VectorSearch:
1155            self._match(TokenType.TABLE)
1156            base_table = self._parse_table()
1157
1158            self._match(TokenType.COMMA)
1159
1160            column_to_search = self._parse_bitwise()
1161            self._match(TokenType.COMMA)
1162
1163            self._match(TokenType.TABLE)
1164            query_table = self._parse_table()
1165
1166            expr = self.expression(
1167                exp.VectorSearch,
1168                this=base_table,
1169                column_to_search=column_to_search,
1170                query_table=query_table,
1171            )
1172
1173            while self._match(TokenType.COMMA):
1174                # query_column_to_search can be named argument or positional
1175                if self._match(TokenType.STRING, advance=False):
1176                    query_column = self._parse_string()
1177                    expr.set("query_column_to_search", query_column)
1178                else:
1179                    arg = self._parse_lambda()
1180                    if arg:
1181                        expr.set(arg.this.name, arg)
1182
1183            return expr
1184
1185        def _parse_export_data(self) -> exp.Export:
1186            self._match_text_seq("DATA")
1187
1188            return self.expression(
1189                exp.Export,
1190                connection=self._match_text_seq("WITH", "CONNECTION") and self._parse_table_parts(),
1191                options=self._parse_properties(),
1192                this=self._match_text_seq("AS") and self._parse_select(),
1193            )
1194
1195    class Generator(generator.Generator):
1196        INTERVAL_ALLOWS_PLURAL_FORM = False
1197        JOIN_HINTS = False
1198        QUERY_HINTS = False
1199        TABLE_HINTS = False
1200        LIMIT_FETCH = "LIMIT"
1201        RENAME_TABLE_WITH_DB = False
1202        NVL2_SUPPORTED = False
1203        UNNEST_WITH_ORDINALITY = False
1204        COLLATE_IS_FUNC = True
1205        LIMIT_ONLY_LITERALS = True
1206        SUPPORTS_TABLE_ALIAS_COLUMNS = False
1207        UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
1208        JSON_KEY_VALUE_PAIR_SEP = ","
1209        NULL_ORDERING_SUPPORTED = False
1210        IGNORE_NULLS_IN_FUNC = True
1211        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
1212        CAN_IMPLEMENT_ARRAY_ANY = True
1213        SUPPORTS_TO_NUMBER = False
1214        NAMED_PLACEHOLDER_TOKEN = "@"
1215        HEX_FUNC = "TO_HEX"
1216        WITH_PROPERTIES_PREFIX = "OPTIONS"
1217        SUPPORTS_EXPLODING_PROJECTIONS = False
1218        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
1219        SUPPORTS_UNIX_SECONDS = True
1220
1221        SAFE_JSON_PATH_KEY_RE = re.compile(r"^[_\-a-zA-Z][\-\w]*$")
1222
1223        TS_OR_DS_TYPES = (
1224            exp.TsOrDsToDatetime,
1225            exp.TsOrDsToTimestamp,
1226            exp.TsOrDsToTime,
1227            exp.TsOrDsToDate,
1228        )
1229
1230        TRANSFORMS = {
1231            **generator.Generator.TRANSFORMS,
1232            exp.ApproxTopK: rename_func("APPROX_TOP_COUNT"),
1233            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1234            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
1235            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
1236            exp.Array: inline_array_unless_query,
1237            exp.ArrayContains: _array_contains_sql,
1238            exp.ArrayFilter: filter_array_using_unnest,
1239            exp.ArrayRemove: filter_array_using_unnest,
1240            exp.ByteLength: rename_func("BYTE_LENGTH"),
1241            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
1242            exp.CollateProperty: lambda self, e: (
1243                f"DEFAULT COLLATE {self.sql(e, 'this')}"
1244                if e.args.get("default")
1245                else f"COLLATE {self.sql(e, 'this')}"
1246            ),
1247            exp.Commit: lambda *_: "COMMIT TRANSACTION",
1248            exp.CountIf: rename_func("COUNTIF"),
1249            exp.Create: _create_sql,
1250            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
1251            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
1252            exp.DateDiff: lambda self, e: self.func(
1253                "DATE_DIFF", e.this, e.expression, unit_to_var(e)
1254            ),
1255            exp.DateFromParts: rename_func("DATE"),
1256            exp.DateStrToDate: datestrtodate_sql,
1257            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
1258            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
1259            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
1260            exp.DateFromUnixDate: rename_func("DATE_FROM_UNIX_DATE"),
1261            exp.FromTimeZone: lambda self, e: self.func(
1262                "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'"
1263            ),
1264            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
1265            exp.GroupConcat: lambda self, e: groupconcat_sql(
1266                self, e, func_name="STRING_AGG", within_group=False
1267            ),
1268            exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))),
1269            exp.HexString: lambda self, e: self.hexstring_sql(e, binary_function_repr="FROM_HEX"),
1270            exp.If: if_sql(false_value="NULL"),
1271            exp.ILike: no_ilike_sql,
1272            exp.IntDiv: rename_func("DIV"),
1273            exp.Int64: rename_func("INT64"),
1274            exp.JSONBool: rename_func("BOOL"),
1275            exp.JSONExtract: _json_extract_sql,
1276            exp.JSONExtractArray: _json_extract_sql,
1277            exp.JSONExtractScalar: _json_extract_sql,
1278            exp.JSONFormat: rename_func("TO_JSON_STRING"),
1279            exp.JSONKeysAtDepth: rename_func("JSON_KEYS"),
1280            exp.JSONValueArray: rename_func("JSON_VALUE_ARRAY"),
1281            exp.Levenshtein: _levenshtein_sql,
1282            exp.Max: max_or_greatest,
1283            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
1284            exp.MD5Digest: rename_func("MD5"),
1285            exp.Min: min_or_least,
1286            exp.Normalize: lambda self, e: self.func(
1287                "NORMALIZE_AND_CASEFOLD" if e.args.get("is_casefold") else "NORMALIZE",
1288                e.this,
1289                e.args.get("form"),
1290            ),
1291            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1292            exp.RegexpExtract: lambda self, e: self.func(
1293                "REGEXP_EXTRACT",
1294                e.this,
1295                e.expression,
1296                e.args.get("position"),
1297                e.args.get("occurrence"),
1298            ),
1299            exp.RegexpExtractAll: lambda self, e: self.func(
1300                "REGEXP_EXTRACT_ALL", e.this, e.expression
1301            ),
1302            exp.RegexpReplace: regexp_replace_sql,
1303            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
1304            exp.ReturnsProperty: _returnsproperty_sql,
1305            exp.Rollback: lambda *_: "ROLLBACK TRANSACTION",
1306            exp.ParseTime: lambda self, e: self.func("PARSE_TIME", self.format_time(e), e.this),
1307            exp.ParseDatetime: lambda self, e: self.func(
1308                "PARSE_DATETIME", self.format_time(e), e.this
1309            ),
1310            exp.Select: transforms.preprocess(
1311                [
1312                    transforms.explode_projection_to_unnest(),
1313                    transforms.unqualify_unnest,
1314                    transforms.eliminate_distinct_on,
1315                    _alias_ordered_group,
1316                    transforms.eliminate_semi_and_anti_joins,
1317                ]
1318            ),
1319            exp.SHA: rename_func("SHA1"),
1320            exp.SHA2: sha256_sql,
1321            exp.StabilityProperty: lambda self, e: (
1322                "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
1323            ),
1324            exp.String: rename_func("STRING"),
1325            exp.StrPosition: lambda self, e: (
1326                strposition_sql(
1327                    self, e, func_name="INSTR", supports_position=True, supports_occurrence=True
1328                )
1329            ),
1330            exp.StrToDate: _str_to_datetime_sql,
1331            exp.StrToTime: _str_to_datetime_sql,
1332            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
1333            exp.TimeFromParts: rename_func("TIME"),
1334            exp.TimestampFromParts: rename_func("DATETIME"),
1335            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
1336            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
1337            exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"),
1338            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
1339            exp.TimeStrToTime: timestrtotime_sql,
1340            exp.Transaction: lambda *_: "BEGIN TRANSACTION",
1341            exp.TsOrDsAdd: _ts_or_ds_add_sql,
1342            exp.TsOrDsDiff: _ts_or_ds_diff_sql,
1343            exp.TsOrDsToTime: rename_func("TIME"),
1344            exp.TsOrDsToDatetime: rename_func("DATETIME"),
1345            exp.TsOrDsToTimestamp: rename_func("TIMESTAMP"),
1346            exp.Unhex: rename_func("FROM_HEX"),
1347            exp.UnixDate: rename_func("UNIX_DATE"),
1348            exp.UnixToTime: _unix_to_time_sql,
1349            exp.Uuid: lambda *_: "GENERATE_UUID()",
1350            exp.Values: _derived_table_values_to_unnest,
1351            exp.VariancePop: rename_func("VAR_POP"),
1352            exp.SafeDivide: rename_func("SAFE_DIVIDE"),
1353        }
1354
1355        SUPPORTED_JSON_PATH_PARTS = {
1356            exp.JSONPathKey,
1357            exp.JSONPathRoot,
1358            exp.JSONPathSubscript,
1359        }
1360
1361        TYPE_MAPPING = {
1362            **generator.Generator.TYPE_MAPPING,
1363            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
1364            exp.DataType.Type.BIGINT: "INT64",
1365            exp.DataType.Type.BINARY: "BYTES",
1366            exp.DataType.Type.BLOB: "BYTES",
1367            exp.DataType.Type.BOOLEAN: "BOOL",
1368            exp.DataType.Type.CHAR: "STRING",
1369            exp.DataType.Type.DECIMAL: "NUMERIC",
1370            exp.DataType.Type.DOUBLE: "FLOAT64",
1371            exp.DataType.Type.FLOAT: "FLOAT64",
1372            exp.DataType.Type.INT: "INT64",
1373            exp.DataType.Type.NCHAR: "STRING",
1374            exp.DataType.Type.NVARCHAR: "STRING",
1375            exp.DataType.Type.SMALLINT: "INT64",
1376            exp.DataType.Type.TEXT: "STRING",
1377            exp.DataType.Type.TIMESTAMP: "DATETIME",
1378            exp.DataType.Type.TIMESTAMPNTZ: "DATETIME",
1379            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
1380            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
1381            exp.DataType.Type.TINYINT: "INT64",
1382            exp.DataType.Type.ROWVERSION: "BYTES",
1383            exp.DataType.Type.UUID: "STRING",
1384            exp.DataType.Type.VARBINARY: "BYTES",
1385            exp.DataType.Type.VARCHAR: "STRING",
1386            exp.DataType.Type.VARIANT: "ANY TYPE",
1387        }
1388
1389        PROPERTIES_LOCATION = {
1390            **generator.Generator.PROPERTIES_LOCATION,
1391            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1392            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1393        }
1394
1395        # WINDOW comes after QUALIFY
1396        # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#window_clause
1397        AFTER_HAVING_MODIFIER_TRANSFORMS = {
1398            "qualify": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["qualify"],
1399            "windows": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["windows"],
1400        }
1401
1402        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
1403        RESERVED_KEYWORDS = {
1404            "all",
1405            "and",
1406            "any",
1407            "array",
1408            "as",
1409            "asc",
1410            "assert_rows_modified",
1411            "at",
1412            "between",
1413            "by",
1414            "case",
1415            "cast",
1416            "collate",
1417            "contains",
1418            "create",
1419            "cross",
1420            "cube",
1421            "current",
1422            "default",
1423            "define",
1424            "desc",
1425            "distinct",
1426            "else",
1427            "end",
1428            "enum",
1429            "escape",
1430            "except",
1431            "exclude",
1432            "exists",
1433            "extract",
1434            "false",
1435            "fetch",
1436            "following",
1437            "for",
1438            "from",
1439            "full",
1440            "group",
1441            "grouping",
1442            "groups",
1443            "hash",
1444            "having",
1445            "if",
1446            "ignore",
1447            "in",
1448            "inner",
1449            "intersect",
1450            "interval",
1451            "into",
1452            "is",
1453            "join",
1454            "lateral",
1455            "left",
1456            "like",
1457            "limit",
1458            "lookup",
1459            "merge",
1460            "natural",
1461            "new",
1462            "no",
1463            "not",
1464            "null",
1465            "nulls",
1466            "of",
1467            "on",
1468            "or",
1469            "order",
1470            "outer",
1471            "over",
1472            "partition",
1473            "preceding",
1474            "proto",
1475            "qualify",
1476            "range",
1477            "recursive",
1478            "respect",
1479            "right",
1480            "rollup",
1481            "rows",
1482            "select",
1483            "set",
1484            "some",
1485            "struct",
1486            "tablesample",
1487            "then",
1488            "to",
1489            "treat",
1490            "true",
1491            "unbounded",
1492            "union",
1493            "unnest",
1494            "using",
1495            "when",
1496            "where",
1497            "window",
1498            "with",
1499            "within",
1500        }
1501
1502        def datetrunc_sql(self, expression: exp.DateTrunc) -> str:
1503            unit = expression.unit
1504            unit_sql = unit.name if unit.is_string else self.sql(unit)
1505            return self.func("DATE_TRUNC", expression.this, unit_sql, expression.args.get("zone"))
1506
1507        def mod_sql(self, expression: exp.Mod) -> str:
1508            this = expression.this
1509            expr = expression.expression
1510            return self.func(
1511                "MOD",
1512                this.unnest() if isinstance(this, exp.Paren) else this,
1513                expr.unnest() if isinstance(expr, exp.Paren) else expr,
1514            )
1515
1516        def column_parts(self, expression: exp.Column) -> str:
1517            if expression.meta.get("quoted_column"):
1518                # If a column reference is of the form `dataset.table`.name, we need
1519                # to preserve the quoted table path, otherwise the reference breaks
1520                table_parts = ".".join(p.name for p in expression.parts[:-1])
1521                table_path = self.sql(exp.Identifier(this=table_parts, quoted=True))
1522                return f"{table_path}.{self.sql(expression, 'this')}"
1523
1524            return super().column_parts(expression)
1525
1526        def table_parts(self, expression: exp.Table) -> str:
1527            # Depending on the context, `x.y` may not resolve to the same data source as `x`.`y`, so
1528            # we need to make sure the correct quoting is used in each case.
1529            #
1530            # For example, if there is a CTE x that clashes with a schema name, then the former will
1531            # return the table y in that schema, whereas the latter will return the CTE's y column:
1532            #
1533            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x.y`   -> cross join
1534            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x`.`y` -> implicit unnest
1535            if expression.meta.get("quoted_table"):
1536                table_parts = ".".join(p.name for p in expression.parts)
1537                return self.sql(exp.Identifier(this=table_parts, quoted=True))
1538
1539            return super().table_parts(expression)
1540
1541        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1542            this = expression.this
1543            if isinstance(this, exp.TsOrDsToDatetime):
1544                func_name = "FORMAT_DATETIME"
1545            elif isinstance(this, exp.TsOrDsToTimestamp):
1546                func_name = "FORMAT_TIMESTAMP"
1547            elif isinstance(this, exp.TsOrDsToTime):
1548                func_name = "FORMAT_TIME"
1549            else:
1550                func_name = "FORMAT_DATE"
1551
1552            time_expr = this if isinstance(this, self.TS_OR_DS_TYPES) else expression
1553            return self.func(
1554                func_name, self.format_time(expression), time_expr.this, expression.args.get("zone")
1555            )
1556
1557        def eq_sql(self, expression: exp.EQ) -> str:
1558            # Operands of = cannot be NULL in BigQuery
1559            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
1560                if not isinstance(expression.parent, exp.Update):
1561                    return "NULL"
1562
1563            return self.binary(expression, "=")
1564
1565        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
1566            parent = expression.parent
1567
1568            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
1569            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
1570            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
1571                return self.func(
1572                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
1573                )
1574
1575            return super().attimezone_sql(expression)
1576
1577        def trycast_sql(self, expression: exp.TryCast) -> str:
1578            return self.cast_sql(expression, safe_prefix="SAFE_")
1579
1580        def bracket_sql(self, expression: exp.Bracket) -> str:
1581            this = expression.this
1582            expressions = expression.expressions
1583
1584            if len(expressions) == 1 and this and this.is_type(exp.DataType.Type.STRUCT):
1585                arg = expressions[0]
1586                if arg.type is None:
1587                    from sqlglot.optimizer.annotate_types import annotate_types
1588
1589                    arg = annotate_types(arg, dialect=self.dialect)
1590
1591                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
1592                    # BQ doesn't support bracket syntax with string values for structs
1593                    return f"{self.sql(this)}.{arg.name}"
1594
1595            expressions_sql = self.expressions(expression, flat=True)
1596            offset = expression.args.get("offset")
1597
1598            if offset == 0:
1599                expressions_sql = f"OFFSET({expressions_sql})"
1600            elif offset == 1:
1601                expressions_sql = f"ORDINAL({expressions_sql})"
1602            elif offset is not None:
1603                self.unsupported(f"Unsupported array offset: {offset}")
1604
1605            if expression.args.get("safe"):
1606                expressions_sql = f"SAFE_{expressions_sql}"
1607
1608            return f"{self.sql(this)}[{expressions_sql}]"
1609
1610        def in_unnest_op(self, expression: exp.Unnest) -> str:
1611            return self.sql(expression)
1612
1613        def version_sql(self, expression: exp.Version) -> str:
1614            if expression.name == "TIMESTAMP":
1615                expression.set("this", "SYSTEM_TIME")
1616            return super().version_sql(expression)
1617
1618        def contains_sql(self, expression: exp.Contains) -> str:
1619            this = expression.this
1620            expr = expression.expression
1621
1622            if isinstance(this, exp.Lower) and isinstance(expr, exp.Lower):
1623                this = this.this
1624                expr = expr.this
1625
1626            return self.func("CONTAINS_SUBSTR", this, expr, expression.args.get("json_scope"))
1627
1628        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1629            this = expression.this
1630
1631            # This ensures that inline type-annotated ARRAY literals like ARRAY<INT64>[1, 2, 3]
1632            # are roundtripped unaffected. The inner check excludes ARRAY(SELECT ...) expressions,
1633            # because they aren't literals and so the above syntax is invalid BigQuery.
1634            if isinstance(this, exp.Array):
1635                elem = seq_get(this.expressions, 0)
1636                if not (elem and elem.find(exp.Query)):
1637                    return f"{self.sql(expression, 'to')}{self.sql(this)}"
1638
1639            return super().cast_sql(expression, safe_prefix=safe_prefix)
1640
1641        def declareitem_sql(self, expression: exp.DeclareItem) -> str:
1642            variables = self.expressions(expression, "this")
1643            default = self.sql(expression, "default")
1644            default = f" DEFAULT {default}" if default else ""
1645            kind = self.sql(expression, "kind")
1646            kind = f" {kind}" if kind else ""
1647
1648            return f"{variables}{kind}{default}"
WEEK_OFFSET = -1

First day of the week in DATE_TRUNC(week). Defaults to 0 (Monday). -1 would be Sunday.

UNNEST_COLUMN_ONLY = True

Whether UNNEST table aliases are treated as column aliases.

SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

LOG_BASE_FIRST: Optional[bool] = False

Whether the base comes first in the LOG function. Possible values: True, False, None (two arguments are not supported by LOG)

HEX_LOWERCASE = True

Whether the HEX function returns a lowercase hexadecimal string.

FORCE_EARLY_ALIAS_REF_EXPANSION = True

Whether alias reference expansion (_expand_alias_refs()) should run before column qualification (_qualify_columns()).

For example:

WITH data AS ( SELECT 1 AS id, 2 AS my_id ) SELECT id AS my_id FROM data WHERE my_id = 1 GROUP BY my_id, HAVING my_id = 1

In most dialects, "my_id" would refer to "data.my_id" across the query, except: - BigQuery, which will forward the alias to GROUP BY + HAVING clauses i.e it resolves to "WHERE my_id = 1 GROUP BY id HAVING id = 1" - Clickhouse, which will forward the alias across the query i.e it resolves to "WHERE id = 1 GROUP BY id HAVING id = 1"

PRESERVE_ORIGINAL_NAMES: bool = True

Whether the name of the function should be preserved inside the node's metadata, can be useful for roundtripping deprecated vs new functions that share an AST node e.g JSON_VALUE vs JSON_EXTRACT_SCALAR in BigQuery

HEX_STRING_IS_INTEGER_TYPE: bool = True

Whether hex strings such as x'CC' evaluate to integer or binary/blob type

NORMALIZATION_STRATEGY = <NormalizationStrategy.CASE_INSENSITIVE: 'CASE_INSENSITIVE'>

Specifies the strategy according to which identifiers should be normalized.

NORMALIZE_FUNCTIONS: bool | str = False

Determines how function names are going to be normalized.

Possible values:

"upper" or True: Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.

TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y', '%E6S': '%S.%f', '%e': '%-d'}

Associates this dialect's time formats with their equivalent Python strftime formats.

FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}

Helper which is used for parsing the special syntax CAST(x AS DATE FORMAT 'yyyy'). If empty, the corresponding trie will be constructed off of TIME_MAPPING.

PSEUDOCOLUMNS: Set[str] = {'_PARTITIONDATE', '_PARTITIONTIME', '_FILE_NAME', '_TABLE_SUFFIX'}

Columns that are auto-generated by the engine corresponding to this dialect. For example, such columns may be excluded from SELECT * queries.

SET_OP_DISTINCT_BY_DEFAULT: Dict[Type[sqlglot.expressions.Expression], Optional[bool]] = {<class 'sqlglot.expressions.Except'>: None, <class 'sqlglot.expressions.Intersect'>: None, <class 'sqlglot.expressions.Union'>: None}

Whether a set operation uses DISTINCT by default. This is None when either DISTINCT or ALL must be explicitly specified.

COERCES_TO: Dict[sqlglot.expressions.DataType.Type, Set[sqlglot.expressions.DataType.Type]] = {<Type.TEXT: 'TEXT'>: set(), <Type.NVARCHAR: 'NVARCHAR'>: {<Type.TEXT: 'TEXT'>}, <Type.VARCHAR: 'VARCHAR'>: {<Type.TEXT: 'TEXT'>, <Type.NVARCHAR: 'NVARCHAR'>}, <Type.NCHAR: 'NCHAR'>: {<Type.TEXT: 'TEXT'>, <Type.VARCHAR: 'VARCHAR'>, <Type.NVARCHAR: 'NVARCHAR'>}, <Type.CHAR: 'CHAR'>: {<Type.TEXT: 'TEXT'>, <Type.NCHAR: 'NCHAR'>, <Type.VARCHAR: 'VARCHAR'>, <Type.NVARCHAR: 'NVARCHAR'>}, <Type.DOUBLE: 'DOUBLE'>: set(), <Type.FLOAT: 'FLOAT'>: {<Type.DOUBLE: 'DOUBLE'>}, <Type.DECIMAL: 'DECIMAL'>: {<Type.BIGDECIMAL: 'BIGDECIMAL'>, <Type.DOUBLE: 'DOUBLE'>, <Type.FLOAT: 'FLOAT'>}, <Type.BIGINT: 'BIGINT'>: {<Type.DOUBLE: 'DOUBLE'>, <Type.FLOAT: 'FLOAT'>, <Type.DECIMAL: 'DECIMAL'>}, <Type.INT: 'INT'>: {<Type.DOUBLE: 'DOUBLE'>, <Type.FLOAT: 'FLOAT'>, <Type.BIGINT: 'BIGINT'>, <Type.DECIMAL: 'DECIMAL'>}, <Type.SMALLINT: 'SMALLINT'>: {<Type.DOUBLE: 'DOUBLE'>, <Type.FLOAT: 'FLOAT'>, <Type.INT: 'INT'>, <Type.BIGINT: 'BIGINT'>, <Type.DECIMAL: 'DECIMAL'>}, <Type.TINYINT: 'TINYINT'>: {<Type.DOUBLE: 'DOUBLE'>, <Type.FLOAT: 'FLOAT'>, <Type.SMALLINT: 'SMALLINT'>, <Type.INT: 'INT'>, <Type.BIGINT: 'BIGINT'>, <Type.DECIMAL: 'DECIMAL'>}, <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: set(), <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: {<Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>}, <Type.TIMESTAMP: 'TIMESTAMP'>: {<Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>}, <Type.DATETIME: 'DATETIME'>: {<Type.TIMESTAMP: 'TIMESTAMP'>, <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>}, <Type.DATE: 'DATE'>: {<Type.TIMESTAMP: 'TIMESTAMP'>, <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <Type.DATETIME: 'DATETIME'>, <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>}, <Type.NULL: 'NULL'>: {<Type.FLOAT: 'FLOAT'>, <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <Type.NCHAR: 'NCHAR'>, <Type.TIMESTAMP: 'TIMESTAMP'>, <Type.VARCHAR: 'VARCHAR'>, <Type.BIGINT: 'BIGINT'>, <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <Type.DECIMAL: 'DECIMAL'>, <Type.DATE: 'DATE'>, <Type.TEXT: 'TEXT'>, <Type.TINYINT: 'TINYINT'>, <Type.DOUBLE: 'DOUBLE'>, <Type.NVARCHAR: 'NVARCHAR'>, <Type.DATETIME: 'DATETIME'>, <Type.SMALLINT: 'SMALLINT'>, <Type.INT: 'INT'>, <Type.CHAR: 'CHAR'>}, <Type.BIGDECIMAL: 'BIGDECIMAL'>: {<Type.DOUBLE: 'DOUBLE'>}}
TYPE_TO_EXPRESSIONS: Dict[sqlglot.expressions.DataType.Type, Set[Type[sqlglot.expressions.Expression]]] = {<Type.BIGINT: 'BIGINT'>: {<class 'sqlglot.expressions.Int64'>, <class 'sqlglot.expressions.UnixMicros'>, <class 'sqlglot.expressions.UnixSeconds'>, <class 'sqlglot.expressions.UnixDate'>, <class 'sqlglot.expressions.ArraySize'>, <class 'sqlglot.expressions.CountIf'>, <class 'sqlglot.expressions.ApproxDistinct'>, <class 'sqlglot.expressions.Length'>, <class 'sqlglot.expressions.UnixMillis'>}, <Type.BINARY: 'BINARY'>: {<class 'sqlglot.expressions.FromBase32'>, <class 'sqlglot.expressions.FromBase64'>}, <Type.BOOLEAN: 'BOOLEAN'>: {<class 'sqlglot.expressions.StartsWith'>, <class 'sqlglot.expressions.Boolean'>, <class 'sqlglot.expressions.Between'>, <class 'sqlglot.expressions.RegexpLike'>, <class 'sqlglot.expressions.LogicalAnd'>, <class 'sqlglot.expressions.LogicalOr'>, <class 'sqlglot.expressions.In'>, <class 'sqlglot.expressions.EndsWith'>}, <Type.DATE: 'DATE'>: {<class 'sqlglot.expressions.TimeStrToDate'>, <class 'sqlglot.expressions.DateFromParts'>, <class 'sqlglot.expressions.LastDay'>, <class 'sqlglot.expressions.TsOrDsToDate'>, <class 'sqlglot.expressions.CurrentDate'>, <class 'sqlglot.expressions.StrToDate'>, <class 'sqlglot.expressions.Date'>, <class 'sqlglot.expressions.DiToDate'>, <class 'sqlglot.expressions.DateStrToDate'>}, <Type.DATETIME: 'DATETIME'>: {<class 'sqlglot.expressions.CurrentDatetime'>, <class 'sqlglot.expressions.DatetimeSub'>, <class 'sqlglot.expressions.DatetimeAdd'>, <class 'sqlglot.expressions.Datetime'>}, <Type.DOUBLE: 'DOUBLE'>: {<class 'sqlglot.expressions.Stddev'>, <class 'sqlglot.expressions.Pow'>, <class 'sqlglot.expressions.VariancePop'>, <class 'sqlglot.expressions.Sqrt'>, <class 'sqlglot.expressions.Variance'>, <class 'sqlglot.expressions.ToDouble'>, <class 'sqlglot.expressions.ApproxQuantile'>, <class 'sqlglot.expressions.SafeDivide'>, <class 'sqlglot.expressions.Ln'>, <class 'sqlglot.expressions.Quantile'>, <class 'sqlglot.expressions.Exp'>, <class 'sqlglot.expressions.StddevSamp'>, <class 'sqlglot.expressions.Round'>, <class 'sqlglot.expressions.StddevPop'>, <class 'sqlglot.expressions.Avg'>, <class 'sqlglot.expressions.Log'>}, <Type.INT: 'INT'>: {<class 'sqlglot.expressions.Sign'>, <class 'sqlglot.expressions.DateDiff'>, <class 'sqlglot.expressions.Ascii'>, <class 'sqlglot.expressions.TimeDiff'>, <class 'sqlglot.expressions.DatetimeDiff'>, <class 'sqlglot.expressions.Unicode'>, <class 'sqlglot.expressions.Ceil'>, <class 'sqlglot.expressions.StrPosition'>, <class 'sqlglot.expressions.DateToDi'>, <class 'sqlglot.expressions.Levenshtein'>, <class 'sqlglot.expressions.TsOrDiToDi'>, <class 'sqlglot.expressions.TimestampDiff'>}, <Type.INTERVAL: 'INTERVAL'>: {<class 'sqlglot.expressions.JustifyInterval'>, <class 'sqlglot.expressions.MakeInterval'>, <class 'sqlglot.expressions.JustifyHours'>, <class 'sqlglot.expressions.Interval'>, <class 'sqlglot.expressions.JustifyDays'>}, <Type.JSON: 'JSON'>: {<class 'sqlglot.expressions.ParseJSON'>}, <Type.TIME: 'TIME'>: {<class 'sqlglot.expressions.TimeSub'>, <class 'sqlglot.expressions.CurrentTime'>, <class 'sqlglot.expressions.Time'>, <class 'sqlglot.expressions.TimeAdd'>}, <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: {<class 'sqlglot.expressions.CurrentTimestamp'>, <class 'sqlglot.expressions.TimeStrToTime'>, <class 'sqlglot.expressions.TimestampSub'>, <class 'sqlglot.expressions.TimestampAdd'>, <class 'sqlglot.expressions.UnixToTime'>, <class 'sqlglot.expressions.StrToTime'>}, <Type.TINYINT: 'TINYINT'>: {<class 'sqlglot.expressions.Month'>, <class 'sqlglot.expressions.Day'>, <class 'sqlglot.expressions.Week'>, <class 'sqlglot.expressions.Quarter'>, <class 'sqlglot.expressions.Year'>}, <Type.VARCHAR: 'VARCHAR'>: {<class 'sqlglot.expressions.GroupConcat'>, <class 'sqlglot.expressions.UnixToStr'>, <class 'sqlglot.expressions.TimeToTimeStr'>, <class 'sqlglot.expressions.ToBase64'>, <class 'sqlglot.expressions.Concat'>, <class 'sqlglot.expressions.Upper'>, <class 'sqlglot.expressions.Lower'>, <class 'sqlglot.expressions.Trim'>, <class 'sqlglot.expressions.DateToDateStr'>, <class 'sqlglot.expressions.UnixToTimeStr'>, <class 'sqlglot.expressions.DPipe'>, <class 'sqlglot.expressions.TimeToStr'>, <class 'sqlglot.expressions.ArrayToString'>, <class 'sqlglot.expressions.String'>, <class 'sqlglot.expressions.ConcatWs'>, <class 'sqlglot.expressions.Initcap'>, <class 'sqlglot.expressions.ToBase32'>, <class 'sqlglot.expressions.Chr'>, <class 'sqlglot.expressions.Substring'>, <class 'sqlglot.expressions.TsOrDsToDateStr'>, <class 'sqlglot.expressions.ArrayConcat'>}}
ANNOTATORS: Dict[Type[~E], Callable[[sqlglot.optimizer.annotate_types.TypeAnnotator, ~E], ~E]] = {<class 'sqlglot.expressions.Alias'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.BitwiseNot'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Neg'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Not'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Paren'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.PivotAlias'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Unary'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Add'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.And'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Binary'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.BitwiseAnd'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.BitwiseLeftShift'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.BitwiseOr'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.BitwiseRightShift'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Collate'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Connector'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Corr'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.CovarPop'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.CovarSamp'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.DPipe'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Distance'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Div'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Dot'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.EQ'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Escape'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.GT'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.GTE'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Glob'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.ILike'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.IntDiv'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Is'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.JSONArrayContains'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.JSONBContains'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.JSONBExtract'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.JSONBExtractScalar'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Kwarg'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.LT'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.LTE'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Like'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Mod'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Mul'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.NEQ'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.NullSafeEQ'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.NullSafeNEQ'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Or'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Overlaps'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Pow'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.PropertyEQ'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.RegexpILike'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.RegexpLike'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.SimilarTo'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Slice'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Sub'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Int64'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixMicros'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixSeconds'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.CountIf'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Length'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixMillis'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.FromBase32'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.FromBase64'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Boolean'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Between'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.In'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.EndsWith'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeStrToDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.LastDay'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.CurrentDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Date'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DiToDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.CurrentDatetime'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DatetimeSub'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DatetimeAdd'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Datetime'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Stddev'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.VariancePop'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Sqrt'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Variance'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ToDouble'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ApproxQuantile'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.SafeDivide'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Ln'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Quantile'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Exp'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.StddevSamp'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Round'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.StddevPop'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Avg'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Log'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Sign'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.DateDiff'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Ascii'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.TimeDiff'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DatetimeDiff'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Unicode'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Ceil'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DateToDi'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Levenshtein'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDiToDi'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampDiff'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.JustifyInterval'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.MakeInterval'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.JustifyHours'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Interval'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.JustifyDays'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ParseJSON'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeSub'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.CurrentTime'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Time'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.CurrentTimestampLTZ'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.CurrentTimestamp'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeStrToTime'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampSub'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampAdd'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Month'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Day'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Week'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Quarter'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Year'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixToStr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeToTimeStr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ToBase64'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Concat'>: <function _annotate_concat>, <class 'sqlglot.expressions.Upper'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Lower'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.DateToDateStr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixToTimeStr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeToStr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayToString'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.String'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ConcatWs'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Initcap'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ToBase32'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Chr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Substring'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDateStr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayConcat'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Abs'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Anonymous'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Array'>: <function _annotate_array>, <class 'sqlglot.expressions.AnyValue'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ArrayAgg'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ArrayConcatAgg'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ArrayFirst'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ArrayLast'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ArrayReverse'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ArraySlice'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Bracket'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Case'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Coalesce'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Count'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.DataType'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.DateAdd'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.DateSub'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Distinct'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Filter'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.GenerateDateArray'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.GenerateTimestampArray'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Greatest'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.If'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Least'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Literal'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.LastValue'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Max'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Min'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Null'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Nullif'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Struct'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Sum'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.SortArray'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Timestamp'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.TryCast'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Unnest'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Window'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Floor'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Left'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Right'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Pad'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Repeat'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.ApproxTopSum'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.ApproxTopK'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.ApproxQuantiles'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.BitwiseAndAgg'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.BitwiseOrAgg'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.BitwiseXorAgg'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.BitwiseCountAgg'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.ByteLength'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.ByteString'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.CodePointsToBytes'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.CodePointsToString'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Contains'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.CumeDist'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.DateFromUnixDate'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.DenseRank'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.FarmFingerprint'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.FirstValue'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Unhex'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Float64'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Format'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Grouping'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.IgnoreNulls'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONArray'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONArrayAppend'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONArrayInsert'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONBool'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONExtractArray'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONKeysAtDepth'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONRemove'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONSet'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONStripNulls'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONType'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONValueArray'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Lag'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Lead'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.LowerHex'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.LaxBool'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.LaxFloat64'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.LaxInt64'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.LaxString'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Normalize'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.NthValue'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Ntile'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.ParseTime'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.ParseDatetime'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.ParseBignumeric'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.ParseNumeric'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.PercentileDisc'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.PercentRank'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Rank'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.RegexpExtractAll'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.RegexpInstr'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Replace'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.RespectNulls'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Reverse'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.RowNumber'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.SafeConvertBytesToString'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Soundex'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.SHA'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.SHA2'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Split'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.TimestampFromParts'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.TimeFromParts'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.TimeTrunc'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.ToCodePoints'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.TsOrDsToTime'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Translate'>: <function BigQuery.<lambda>>}
def normalize_identifier(self, expression: ~E) -> ~E:
639    def normalize_identifier(self, expression: E) -> E:
640        if (
641            isinstance(expression, exp.Identifier)
642            and self.normalization_strategy is NormalizationStrategy.CASE_INSENSITIVE
643        ):
644            parent = expression.parent
645            while isinstance(parent, exp.Dot):
646                parent = parent.parent
647
648            # In BigQuery, CTEs are case-insensitive, but UDF and table names are case-sensitive
649            # by default. The following check uses a heuristic to detect tables based on whether
650            # they are qualified. This should generally be correct, because tables in BigQuery
651            # must be qualified with at least a dataset, unless @@dataset_id is set.
652            case_sensitive = (
653                isinstance(parent, exp.UserDefinedFunction)
654                or (
655                    isinstance(parent, exp.Table)
656                    and parent.db
657                    and (parent.meta.get("quoted_table") or not parent.meta.get("maybe_column"))
658                )
659                or expression.meta.get("is_table")
660            )
661            if not case_sensitive:
662                expression.set("this", expression.this.lower())
663
664            return t.cast(E, expression)
665
666        return super().normalize_identifier(expression)

Transforms an identifier in a way that resembles how it'd be resolved by this dialect.

For example, an identifier like FoO would be resolved as foo in Postgres, because it lowercases all unquoted identifiers. On the other hand, Snowflake uppercases them, so it would resolve it as FOO. If it was quoted, it'd need to be treated as case-sensitive, and so any normalization would be prohibited in order to avoid "breaking" the identifier.

There are also dialects like Spark, which are case-insensitive even when quotes are present, and dialects like MySQL, whose resolution rules match those employed by the underlying operating system, for example they may always be case-sensitive in Linux.

Finally, the normalization behavior of some engines can even be controlled through flags, like in Redshift's case, where users can explicitly set enable_case_sensitive_identifier.

SQLGlot aims to understand and handle all of these different behaviors gracefully, so that it can analyze queries in the optimizer and successfully capture their semantics.

SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'BigQuery.Tokenizer'>
jsonpath_tokenizer_class = <class 'BigQuery.JSONPathTokenizer'>
parser_class = <class 'BigQuery.Parser'>
generator_class = <class 'BigQuery.Generator'>
TIME_TRIE: Dict = {'%': {'D': {0: True}, 'E': {'6': {'S': {0: True}}}, 'e': {0: True}}}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D', '%S.%f': '%E6S', '%-d': '%e'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}, 'S': {'.': {'%': {'f': {0: True}}}}, '-': {'d': {0: True}}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {'%d': 'DD', '%m': 'MM', '%b': 'MON', '%B': 'MONTH', '%Y': 'YYYY', '%y': 'YY', '%I': 'HH12', '%H': 'HH24', '%M': 'MI', '%S': 'SS', '%f': 'SSSSS', '%z': 'TZH'}
INVERSE_FORMAT_TRIE: Dict = {'%': {'d': {0: True}, 'm': {0: True}, 'b': {0: True}, 'B': {0: True}, 'Y': {0: True}, 'y': {0: True}, 'I': {0: True}, 'H': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}, 'z': {0: True}}}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = '0x'
HEX_END: Optional[str] = ''
BYTE_START: Optional[str] = "b'"
BYTE_END: Optional[str] = "'"
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class BigQuery.JSONPathTokenizer(sqlglot.jsonpath.JSONPathTokenizer):
668    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
669        VAR_TOKENS = {
670            TokenType.DASH,
671            TokenType.VAR,
672        }
VAR_TOKENS = {<TokenType.VAR: 'VAR'>, <TokenType.DASH: 'DASH'>}
class BigQuery.Tokenizer(sqlglot.tokens.Tokenizer):
674    class Tokenizer(tokens.Tokenizer):
675        QUOTES = ["'", '"', '"""', "'''"]
676        COMMENTS = ["--", "#", ("/*", "*/")]
677        IDENTIFIERS = ["`"]
678        STRING_ESCAPES = ["\\"]
679
680        HEX_STRINGS = [("0x", ""), ("0X", "")]
681
682        BYTE_STRINGS = [
683            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
684        ]
685
686        RAW_STRINGS = [
687            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
688        ]
689
690        NESTED_COMMENTS = False
691
692        KEYWORDS = {
693            **tokens.Tokenizer.KEYWORDS,
694            "ANY TYPE": TokenType.VARIANT,
695            "BEGIN": TokenType.COMMAND,
696            "BEGIN TRANSACTION": TokenType.BEGIN,
697            "BYTEINT": TokenType.INT,
698            "BYTES": TokenType.BINARY,
699            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
700            "DATETIME": TokenType.TIMESTAMP,
701            "DECLARE": TokenType.DECLARE,
702            "ELSEIF": TokenType.COMMAND,
703            "EXCEPTION": TokenType.COMMAND,
704            "EXPORT": TokenType.EXPORT,
705            "FLOAT64": TokenType.DOUBLE,
706            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
707            "LOOP": TokenType.COMMAND,
708            "MODEL": TokenType.MODEL,
709            "NOT DETERMINISTIC": TokenType.VOLATILE,
710            "RECORD": TokenType.STRUCT,
711            "REPEAT": TokenType.COMMAND,
712            "TIMESTAMP": TokenType.TIMESTAMPTZ,
713            "WHILE": TokenType.COMMAND,
714        }
715        KEYWORDS.pop("DIV")
716        KEYWORDS.pop("VALUES")
717        KEYWORDS.pop("/*+")
QUOTES = ["'", '"', '"""', "'''"]
COMMENTS = ['--', '#', ('/*', '*/')]
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
HEX_STRINGS = [('0x', ''), ('0X', '')]
BYTE_STRINGS = [("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS = [("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '|>': <TokenType.PIPE_GT: 'PIPE_GT'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_SCHEMA': <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NAMESPACE': <TokenType.NAMESPACE: 'NAMESPACE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT256': <TokenType.INT256: 'INT256'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'UINT128': <TokenType.UINT128: 'UINT128'>, 'UINT256': <TokenType.UINT256: 'UINT256'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'DECIMAL256': <TokenType.DECIMAL256: 'DECIMAL256'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.ANALYZE: 'ANALYZE'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'REVOKE': <TokenType.REVOKE: 'REVOKE'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'DECLARE': <TokenType.DECLARE: 'DECLARE'>, 'ELSEIF': <TokenType.COMMAND: 'COMMAND'>, 'EXCEPTION': <TokenType.COMMAND: 'COMMAND'>, 'EXPORT': <TokenType.EXPORT: 'EXPORT'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'FOR SYSTEM_TIME': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'LOOP': <TokenType.COMMAND: 'COMMAND'>, 'MODEL': <TokenType.MODEL: 'MODEL'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>, 'REPEAT': <TokenType.COMMAND: 'COMMAND'>, 'WHILE': <TokenType.COMMAND: 'COMMAND'>}
class BigQuery.Parser(sqlglot.parser.Parser):
 719    class Parser(parser.Parser):
 720        PREFIXED_PIVOT_COLUMNS = True
 721        LOG_DEFAULTS_TO_LN = True
 722        SUPPORTS_IMPLICIT_UNNEST = True
 723        JOINS_HAVE_EQUAL_PRECEDENCE = True
 724
 725        # BigQuery does not allow ASC/DESC to be used as an identifier
 726        ID_VAR_TOKENS = parser.Parser.ID_VAR_TOKENS - {TokenType.ASC, TokenType.DESC}
 727        ALIAS_TOKENS = parser.Parser.ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 728        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 729        COMMENT_TABLE_ALIAS_TOKENS = parser.Parser.COMMENT_TABLE_ALIAS_TOKENS - {
 730            TokenType.ASC,
 731            TokenType.DESC,
 732        }
 733        UPDATE_ALIAS_TOKENS = parser.Parser.UPDATE_ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 734
 735        FUNCTIONS = {
 736            **parser.Parser.FUNCTIONS,
 737            "APPROX_TOP_COUNT": exp.ApproxTopK.from_arg_list,
 738            "BOOL": exp.JSONBool.from_arg_list,
 739            "CONTAINS_SUBSTR": _build_contains_substring,
 740            "DATE": _build_date,
 741            "DATE_ADD": build_date_delta_with_interval(exp.DateAdd),
 742            "DATE_SUB": build_date_delta_with_interval(exp.DateSub),
 743            "DATE_TRUNC": lambda args: exp.DateTrunc(
 744                unit=seq_get(args, 1),
 745                this=seq_get(args, 0),
 746                zone=seq_get(args, 2),
 747            ),
 748            "DATETIME": _build_datetime,
 749            "DATETIME_ADD": build_date_delta_with_interval(exp.DatetimeAdd),
 750            "DATETIME_SUB": build_date_delta_with_interval(exp.DatetimeSub),
 751            "DIV": binary_from_function(exp.IntDiv),
 752            "EDIT_DISTANCE": _build_levenshtein,
 753            "FORMAT_DATE": _build_format_time(exp.TsOrDsToDate),
 754            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
 755            "JSON_EXTRACT_SCALAR": _build_extract_json_with_default_path(exp.JSONExtractScalar),
 756            "JSON_EXTRACT_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
 757            "JSON_EXTRACT_STRING_ARRAY": _build_extract_json_with_default_path(exp.JSONValueArray),
 758            "JSON_KEYS": exp.JSONKeysAtDepth.from_arg_list,
 759            "JSON_QUERY": parser.build_extract_json_with_path(exp.JSONExtract),
 760            "JSON_QUERY_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
 761            "JSON_STRIP_NULLS": _build_json_strip_nulls,
 762            "JSON_VALUE": _build_extract_json_with_default_path(exp.JSONExtractScalar),
 763            "JSON_VALUE_ARRAY": _build_extract_json_with_default_path(exp.JSONValueArray),
 764            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 765            "MD5": exp.MD5Digest.from_arg_list,
 766            "NORMALIZE_AND_CASEFOLD": lambda args: exp.Normalize(
 767                this=seq_get(args, 0), form=seq_get(args, 1), is_casefold=True
 768            ),
 769            "OCTET_LENGTH": exp.ByteLength.from_arg_list,
 770            "TO_HEX": _build_to_hex,
 771            "PARSE_DATE": lambda args: build_formatted_time(exp.StrToDate, "bigquery")(
 772                [seq_get(args, 1), seq_get(args, 0)]
 773            ),
 774            "PARSE_TIME": lambda args: build_formatted_time(exp.ParseTime, "bigquery")(
 775                [seq_get(args, 1), seq_get(args, 0)]
 776            ),
 777            "PARSE_TIMESTAMP": _build_parse_timestamp,
 778            "PARSE_DATETIME": lambda args: build_formatted_time(exp.ParseDatetime, "bigquery")(
 779                [seq_get(args, 1), seq_get(args, 0)]
 780            ),
 781            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
 782            "REGEXP_EXTRACT": _build_regexp_extract(exp.RegexpExtract),
 783            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 784            "REGEXP_EXTRACT_ALL": _build_regexp_extract(
 785                exp.RegexpExtractAll, default_group=exp.Literal.number(0)
 786            ),
 787            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
 788            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
 789            "SPLIT": lambda args: exp.Split(
 790                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
 791                this=seq_get(args, 0),
 792                expression=seq_get(args, 1) or exp.Literal.string(","),
 793            ),
 794            "STRPOS": exp.StrPosition.from_arg_list,
 795            "TIME": _build_time,
 796            "TIME_ADD": build_date_delta_with_interval(exp.TimeAdd),
 797            "TIME_SUB": build_date_delta_with_interval(exp.TimeSub),
 798            "TIMESTAMP": _build_timestamp,
 799            "TIMESTAMP_ADD": build_date_delta_with_interval(exp.TimestampAdd),
 800            "TIMESTAMP_SUB": build_date_delta_with_interval(exp.TimestampSub),
 801            "TIMESTAMP_MICROS": lambda args: exp.UnixToTime(
 802                this=seq_get(args, 0), scale=exp.UnixToTime.MICROS
 803            ),
 804            "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime(
 805                this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS
 806            ),
 807            "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)),
 808            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
 809            "FORMAT_DATETIME": _build_format_time(exp.TsOrDsToDatetime),
 810            "FORMAT_TIMESTAMP": _build_format_time(exp.TsOrDsToTimestamp),
 811            "FORMAT_TIME": _build_format_time(exp.TsOrDsToTime),
 812            "FROM_HEX": exp.Unhex.from_arg_list,
 813            "WEEK": lambda args: exp.WeekStart(this=exp.var(seq_get(args, 0))),
 814        }
 815
 816        FUNCTION_PARSERS = {
 817            **parser.Parser.FUNCTION_PARSERS,
 818            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
 819            "JSON_ARRAY": lambda self: self.expression(
 820                exp.JSONArray, expressions=self._parse_csv(self._parse_bitwise)
 821            ),
 822            "MAKE_INTERVAL": lambda self: self._parse_make_interval(),
 823            "PREDICT": lambda self: self._parse_predict(),
 824            "FEATURES_AT_TIME": lambda self: self._parse_features_at_time(),
 825            "GENERATE_EMBEDDING": lambda self: self._parse_generate_embedding(),
 826            "VECTOR_SEARCH": lambda self: self._parse_vector_search(),
 827        }
 828        FUNCTION_PARSERS.pop("TRIM")
 829
 830        NO_PAREN_FUNCTIONS = {
 831            **parser.Parser.NO_PAREN_FUNCTIONS,
 832            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
 833        }
 834
 835        NESTED_TYPE_TOKENS = {
 836            *parser.Parser.NESTED_TYPE_TOKENS,
 837            TokenType.TABLE,
 838        }
 839
 840        PROPERTY_PARSERS = {
 841            **parser.Parser.PROPERTY_PARSERS,
 842            "NOT DETERMINISTIC": lambda self: self.expression(
 843                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
 844            ),
 845            "OPTIONS": lambda self: self._parse_with_property(),
 846        }
 847
 848        CONSTRAINT_PARSERS = {
 849            **parser.Parser.CONSTRAINT_PARSERS,
 850            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
 851        }
 852
 853        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
 854        RANGE_PARSERS.pop(TokenType.OVERLAPS)
 855
 856        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
 857
 858        DASHED_TABLE_PART_FOLLOW_TOKENS = {TokenType.DOT, TokenType.L_PAREN, TokenType.R_PAREN}
 859
 860        STATEMENT_PARSERS = {
 861            **parser.Parser.STATEMENT_PARSERS,
 862            TokenType.ELSE: lambda self: self._parse_as_command(self._prev),
 863            TokenType.END: lambda self: self._parse_as_command(self._prev),
 864            TokenType.FOR: lambda self: self._parse_for_in(),
 865            TokenType.EXPORT: lambda self: self._parse_export_data(),
 866            TokenType.DECLARE: lambda self: self._parse_declare(),
 867        }
 868
 869        BRACKET_OFFSETS = {
 870            "OFFSET": (0, False),
 871            "ORDINAL": (1, False),
 872            "SAFE_OFFSET": (0, True),
 873            "SAFE_ORDINAL": (1, True),
 874        }
 875
 876        def _parse_for_in(self) -> t.Union[exp.ForIn, exp.Command]:
 877            index = self._index
 878            this = self._parse_range()
 879            self._match_text_seq("DO")
 880            if self._match(TokenType.COMMAND):
 881                self._retreat(index)
 882                return self._parse_as_command(self._prev)
 883            return self.expression(exp.ForIn, this=this, expression=self._parse_statement())
 884
 885        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
 886            this = super()._parse_table_part(schema=schema) or self._parse_number()
 887
 888            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
 889            if isinstance(this, exp.Identifier):
 890                table_name = this.name
 891                while self._match(TokenType.DASH, advance=False) and self._next:
 892                    start = self._curr
 893                    while self._is_connected() and not self._match_set(
 894                        self.DASHED_TABLE_PART_FOLLOW_TOKENS, advance=False
 895                    ):
 896                        self._advance()
 897
 898                    if start == self._curr:
 899                        break
 900
 901                    table_name += self._find_sql(start, self._prev)
 902
 903                this = exp.Identifier(
 904                    this=table_name, quoted=this.args.get("quoted")
 905                ).update_positions(this)
 906            elif isinstance(this, exp.Literal):
 907                table_name = this.name
 908
 909                if self._is_connected() and self._parse_var(any_token=True):
 910                    table_name += self._prev.text
 911
 912                this = exp.Identifier(this=table_name, quoted=True).update_positions(this)
 913
 914            return this
 915
 916        def _parse_table_parts(
 917            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 918        ) -> exp.Table:
 919            table = super()._parse_table_parts(
 920                schema=schema, is_db_reference=is_db_reference, wildcard=True
 921            )
 922
 923            # proj-1.db.tbl -- `1.` is tokenized as a float so we need to unravel it here
 924            if not table.catalog:
 925                if table.db:
 926                    previous_db = table.args["db"]
 927                    parts = table.db.split(".")
 928                    if len(parts) == 2 and not table.args["db"].quoted:
 929                        table.set(
 930                            "catalog", exp.Identifier(this=parts[0]).update_positions(previous_db)
 931                        )
 932                        table.set("db", exp.Identifier(this=parts[1]).update_positions(previous_db))
 933                else:
 934                    previous_this = table.this
 935                    parts = table.name.split(".")
 936                    if len(parts) == 2 and not table.this.quoted:
 937                        table.set(
 938                            "db", exp.Identifier(this=parts[0]).update_positions(previous_this)
 939                        )
 940                        table.set(
 941                            "this", exp.Identifier(this=parts[1]).update_positions(previous_this)
 942                        )
 943
 944            if isinstance(table.this, exp.Identifier) and any("." in p.name for p in table.parts):
 945                alias = table.this
 946                catalog, db, this, *rest = (
 947                    exp.to_identifier(p, quoted=True)
 948                    for p in split_num_words(".".join(p.name for p in table.parts), ".", 3)
 949                )
 950
 951                for part in (catalog, db, this):
 952                    if part:
 953                        part.update_positions(table.this)
 954
 955                if rest and this:
 956                    this = exp.Dot.build([this, *rest])  # type: ignore
 957
 958                table = exp.Table(
 959                    this=this, db=db, catalog=catalog, pivots=table.args.get("pivots")
 960                )
 961                table.meta["quoted_table"] = True
 962            else:
 963                alias = None
 964
 965            # The `INFORMATION_SCHEMA` views in BigQuery need to be qualified by a region or
 966            # dataset, so if the project identifier is omitted we need to fix the ast so that
 967            # the `INFORMATION_SCHEMA.X` bit is represented as a single (quoted) Identifier.
 968            # Otherwise, we wouldn't correctly qualify a `Table` node that references these
 969            # views, because it would seem like the "catalog" part is set, when it'd actually
 970            # be the region/dataset. Merging the two identifiers into a single one is done to
 971            # avoid producing a 4-part Table reference, which would cause issues in the schema
 972            # module, when there are 3-part table names mixed with information schema views.
 973            #
 974            # See: https://cloud.google.com/bigquery/docs/information-schema-intro#syntax
 975            table_parts = table.parts
 976            if len(table_parts) > 1 and table_parts[-2].name.upper() == "INFORMATION_SCHEMA":
 977                # We need to alias the table here to avoid breaking existing qualified columns.
 978                # This is expected to be safe, because if there's an actual alias coming up in
 979                # the token stream, it will overwrite this one. If there isn't one, we are only
 980                # exposing the name that can be used to reference the view explicitly (a no-op).
 981                exp.alias_(
 982                    table,
 983                    t.cast(exp.Identifier, alias or table_parts[-1]),
 984                    table=True,
 985                    copy=False,
 986                )
 987
 988                info_schema_view = f"{table_parts[-2].name}.{table_parts[-1].name}"
 989                new_this = exp.Identifier(this=info_schema_view, quoted=True).update_positions(
 990                    line=table_parts[-2].meta.get("line"),
 991                    col=table_parts[-1].meta.get("col"),
 992                    start=table_parts[-2].meta.get("start"),
 993                    end=table_parts[-1].meta.get("end"),
 994                )
 995                table.set("this", new_this)
 996                table.set("db", seq_get(table_parts, -3))
 997                table.set("catalog", seq_get(table_parts, -4))
 998
 999            return table
1000
1001        def _parse_column(self) -> t.Optional[exp.Expression]:
1002            column = super()._parse_column()
1003            if isinstance(column, exp.Column):
1004                parts = column.parts
1005                if any("." in p.name for p in parts):
1006                    catalog, db, table, this, *rest = (
1007                        exp.to_identifier(p, quoted=True)
1008                        for p in split_num_words(".".join(p.name for p in parts), ".", 4)
1009                    )
1010
1011                    if rest and this:
1012                        this = exp.Dot.build([this, *rest])  # type: ignore
1013
1014                    column = exp.Column(this=this, table=table, db=db, catalog=catalog)
1015                    column.meta["quoted_column"] = True
1016
1017            return column
1018
1019        @t.overload
1020        def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: ...
1021
1022        @t.overload
1023        def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: ...
1024
1025        def _parse_json_object(self, agg=False):
1026            json_object = super()._parse_json_object()
1027            array_kv_pair = seq_get(json_object.expressions, 0)
1028
1029            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
1030            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
1031            if (
1032                array_kv_pair
1033                and isinstance(array_kv_pair.this, exp.Array)
1034                and isinstance(array_kv_pair.expression, exp.Array)
1035            ):
1036                keys = array_kv_pair.this.expressions
1037                values = array_kv_pair.expression.expressions
1038
1039                json_object.set(
1040                    "expressions",
1041                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
1042                )
1043
1044            return json_object
1045
1046        def _parse_bracket(
1047            self, this: t.Optional[exp.Expression] = None
1048        ) -> t.Optional[exp.Expression]:
1049            bracket = super()._parse_bracket(this)
1050
1051            if this is bracket:
1052                return bracket
1053
1054            if isinstance(bracket, exp.Bracket):
1055                for expression in bracket.expressions:
1056                    name = expression.name.upper()
1057
1058                    if name not in self.BRACKET_OFFSETS:
1059                        break
1060
1061                    offset, safe = self.BRACKET_OFFSETS[name]
1062                    bracket.set("offset", offset)
1063                    bracket.set("safe", safe)
1064                    expression.replace(expression.expressions[0])
1065
1066            return bracket
1067
1068        def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]:
1069            unnest = super()._parse_unnest(with_alias=with_alias)
1070
1071            if not unnest:
1072                return None
1073
1074            unnest_expr = seq_get(unnest.expressions, 0)
1075            if unnest_expr:
1076                from sqlglot.optimizer.annotate_types import annotate_types
1077
1078                unnest_expr = annotate_types(unnest_expr, dialect=self.dialect)
1079
1080                # Unnesting a nested array (i.e array of structs) explodes the top-level struct fields,
1081                # in contrast to other dialects such as DuckDB which flattens only the array by default
1082                if unnest_expr.is_type(exp.DataType.Type.ARRAY) and any(
1083                    array_elem.is_type(exp.DataType.Type.STRUCT)
1084                    for array_elem in unnest_expr._type.expressions
1085                ):
1086                    unnest.set("explode_array", True)
1087
1088            return unnest
1089
1090        def _parse_make_interval(self) -> exp.MakeInterval:
1091            expr = exp.MakeInterval()
1092
1093            for arg_key in expr.arg_types:
1094                value = self._parse_lambda()
1095
1096                if not value:
1097                    break
1098
1099                # Non-named arguments are filled sequentially, (optionally) followed by named arguments
1100                # that can appear in any order e.g MAKE_INTERVAL(1, minute => 5, day => 2)
1101                if isinstance(value, exp.Kwarg):
1102                    arg_key = value.this.name
1103
1104                expr.set(arg_key, value)
1105
1106                self._match(TokenType.COMMA)
1107
1108            return expr
1109
1110        def _parse_predict(self) -> exp.Predict:
1111            self._match_text_seq("MODEL")
1112            this = self._parse_table()
1113
1114            self._match(TokenType.COMMA)
1115            self._match_text_seq("TABLE")
1116
1117            return self.expression(
1118                exp.Predict,
1119                this=this,
1120                expression=self._parse_table(),
1121                params_struct=self._match(TokenType.COMMA) and self._parse_bitwise(),
1122            )
1123
1124        def _parse_generate_embedding(self) -> exp.GenerateEmbedding:
1125            self._match_text_seq("MODEL")
1126            this = self._parse_table()
1127
1128            self._match(TokenType.COMMA)
1129            self._match_text_seq("TABLE")
1130
1131            return self.expression(
1132                exp.GenerateEmbedding,
1133                this=this,
1134                expression=self._parse_table(),
1135                params_struct=self._match(TokenType.COMMA) and self._parse_bitwise(),
1136            )
1137
1138        def _parse_features_at_time(self) -> exp.FeaturesAtTime:
1139            self._match(TokenType.TABLE)
1140            this = self._parse_table()
1141
1142            expr = self.expression(exp.FeaturesAtTime, this=this)
1143
1144            while self._match(TokenType.COMMA):
1145                arg = self._parse_lambda()
1146
1147                # Get the LHS of the Kwarg and set the arg to that value, e.g
1148                # "num_rows => 1" sets the expr's `num_rows` arg
1149                if arg:
1150                    expr.set(arg.this.name, arg)
1151
1152            return expr
1153
1154        def _parse_vector_search(self) -> exp.VectorSearch:
1155            self._match(TokenType.TABLE)
1156            base_table = self._parse_table()
1157
1158            self._match(TokenType.COMMA)
1159
1160            column_to_search = self._parse_bitwise()
1161            self._match(TokenType.COMMA)
1162
1163            self._match(TokenType.TABLE)
1164            query_table = self._parse_table()
1165
1166            expr = self.expression(
1167                exp.VectorSearch,
1168                this=base_table,
1169                column_to_search=column_to_search,
1170                query_table=query_table,
1171            )
1172
1173            while self._match(TokenType.COMMA):
1174                # query_column_to_search can be named argument or positional
1175                if self._match(TokenType.STRING, advance=False):
1176                    query_column = self._parse_string()
1177                    expr.set("query_column_to_search", query_column)
1178                else:
1179                    arg = self._parse_lambda()
1180                    if arg:
1181                        expr.set(arg.this.name, arg)
1182
1183            return expr
1184
1185        def _parse_export_data(self) -> exp.Export:
1186            self._match_text_seq("DATA")
1187
1188            return self.expression(
1189                exp.Export,
1190                connection=self._match_text_seq("WITH", "CONNECTION") and self._parse_table_parts(),
1191                options=self._parse_properties(),
1192                this=self._match_text_seq("AS") and self._parse_select(),
1193            )

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
PREFIXED_PIVOT_COLUMNS = True
LOG_DEFAULTS_TO_LN = True
SUPPORTS_IMPLICIT_UNNEST = True
JOINS_HAVE_EQUAL_PRECEDENCE = True
ID_VAR_TOKENS = {<TokenType.LIMIT: 'LIMIT'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.GEOGRAPHYPOINT: 'GEOGRAPHYPOINT'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.GET: 'GET'>, <TokenType.YEAR: 'YEAR'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.ANTI: 'ANTI'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.LEFT: 'LEFT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.BIT: 'BIT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.CASE: 'CASE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.IS: 'IS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.JSONB: 'JSONB'>, <TokenType.VOID: 'VOID'>, <TokenType.ENUM: 'ENUM'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.CUBE: 'CUBE'>, <TokenType.STAGE: 'STAGE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.MERGE: 'MERGE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.KILL: 'KILL'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.RANGE: 'RANGE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.ASOF: 'ASOF'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.ANY: 'ANY'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.APPLY: 'APPLY'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.UINT: 'UINT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.FINAL: 'FINAL'>, <TokenType.BINARY: 'BINARY'>, <TokenType.RENAME: 'RENAME'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.TOP: 'TOP'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.FIRST: 'FIRST'>, <TokenType.JSON: 'JSON'>, <TokenType.SOME: 'SOME'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.NULL: 'NULL'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.INDEX: 'INDEX'>, <TokenType.SINK: 'SINK'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE: 'DATE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.POINT: 'POINT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NEXT: 'NEXT'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MAP: 'MAP'>, <TokenType.INET: 'INET'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.LIST: 'LIST'>, <TokenType.INT128: 'INT128'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ALL: 'ALL'>, <TokenType.COPY: 'COPY'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.RING: 'RING'>, <TokenType.INT256: 'INT256'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ROW: 'ROW'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT256: 'UINT256'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.IPV4: 'IPV4'>, <TokenType.DIV: 'DIV'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DELETE: 'DELETE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.XML: 'XML'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.SET: 'SET'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DETACH: 'DETACH'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.UINT128: 'UINT128'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.TRUE: 'TRUE'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.VAR: 'VAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.FILTER: 'FILTER'>, <TokenType.USE: 'USE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.TIME: 'TIME'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.BLOB: 'BLOB'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.UUID: 'UUID'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.END: 'END'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.PUT: 'PUT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.IPV6: 'IPV6'>, <TokenType.SEMANTIC_VIEW: 'SEMANTIC_VIEW'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.FULL: 'FULL'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.INT: 'INT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.TAG: 'TAG'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.NAME: 'NAME'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DATETIME: 'DATETIME'>}
ALIAS_TOKENS = {<TokenType.LIMIT: 'LIMIT'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.GEOGRAPHYPOINT: 'GEOGRAPHYPOINT'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.GET: 'GET'>, <TokenType.YEAR: 'YEAR'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.ANTI: 'ANTI'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.LEFT: 'LEFT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.BIT: 'BIT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.CASE: 'CASE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.IS: 'IS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.JSONB: 'JSONB'>, <TokenType.VOID: 'VOID'>, <TokenType.ENUM: 'ENUM'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.CUBE: 'CUBE'>, <TokenType.STAGE: 'STAGE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.MERGE: 'MERGE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.KILL: 'KILL'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.RANGE: 'RANGE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.ASOF: 'ASOF'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.ANY: 'ANY'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.APPLY: 'APPLY'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.UINT: 'UINT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.FINAL: 'FINAL'>, <TokenType.BINARY: 'BINARY'>, <TokenType.RENAME: 'RENAME'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.TOP: 'TOP'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.FIRST: 'FIRST'>, <TokenType.JSON: 'JSON'>, <TokenType.SOME: 'SOME'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.NULL: 'NULL'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.INDEX: 'INDEX'>, <TokenType.SINK: 'SINK'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE: 'DATE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.POINT: 'POINT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NEXT: 'NEXT'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MAP: 'MAP'>, <TokenType.INET: 'INET'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.LIST: 'LIST'>, <TokenType.INT128: 'INT128'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ALL: 'ALL'>, <TokenType.COPY: 'COPY'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.RING: 'RING'>, <TokenType.INT256: 'INT256'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ROW: 'ROW'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT256: 'UINT256'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.IPV4: 'IPV4'>, <TokenType.DIV: 'DIV'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DELETE: 'DELETE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.XML: 'XML'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.SET: 'SET'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DETACH: 'DETACH'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.UINT128: 'UINT128'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.TRUE: 'TRUE'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.VAR: 'VAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.FILTER: 'FILTER'>, <TokenType.USE: 'USE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.TIME: 'TIME'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.BLOB: 'BLOB'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.UUID: 'UUID'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.END: 'END'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.PUT: 'PUT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.IPV6: 'IPV6'>, <TokenType.SEMANTIC_VIEW: 'SEMANTIC_VIEW'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.FULL: 'FULL'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.INT: 'INT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.TAG: 'TAG'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.NAME: 'NAME'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DATETIME: 'DATETIME'>}
TABLE_ALIAS_TOKENS = {<TokenType.LIMIT: 'LIMIT'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.GEOGRAPHYPOINT: 'GEOGRAPHYPOINT'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.GET: 'GET'>, <TokenType.YEAR: 'YEAR'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.ANTI: 'ANTI'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.BIT: 'BIT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.CASE: 'CASE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.IS: 'IS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.JSONB: 'JSONB'>, <TokenType.VOID: 'VOID'>, <TokenType.ENUM: 'ENUM'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.CUBE: 'CUBE'>, <TokenType.STAGE: 'STAGE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.MERGE: 'MERGE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.KILL: 'KILL'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.RANGE: 'RANGE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.ANY: 'ANY'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.APPLY: 'APPLY'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.UINT: 'UINT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.FINAL: 'FINAL'>, <TokenType.BINARY: 'BINARY'>, <TokenType.RENAME: 'RENAME'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.TOP: 'TOP'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.FIRST: 'FIRST'>, <TokenType.JSON: 'JSON'>, <TokenType.SOME: 'SOME'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.NULL: 'NULL'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.INDEX: 'INDEX'>, <TokenType.SINK: 'SINK'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE: 'DATE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.POINT: 'POINT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NEXT: 'NEXT'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MAP: 'MAP'>, <TokenType.INET: 'INET'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.LIST: 'LIST'>, <TokenType.INT128: 'INT128'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ALL: 'ALL'>, <TokenType.COPY: 'COPY'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.RING: 'RING'>, <TokenType.INT256: 'INT256'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ROW: 'ROW'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT256: 'UINT256'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.IPV4: 'IPV4'>, <TokenType.DIV: 'DIV'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DELETE: 'DELETE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.XML: 'XML'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.SET: 'SET'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DETACH: 'DETACH'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.UINT128: 'UINT128'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.TRUE: 'TRUE'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.VAR: 'VAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.FILTER: 'FILTER'>, <TokenType.USE: 'USE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.TIME: 'TIME'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.BLOB: 'BLOB'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.UUID: 'UUID'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.END: 'END'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.PUT: 'PUT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.IPV6: 'IPV6'>, <TokenType.SEMANTIC_VIEW: 'SEMANTIC_VIEW'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.INT: 'INT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.TAG: 'TAG'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.NAME: 'NAME'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DATETIME: 'DATETIME'>}
COMMENT_TABLE_ALIAS_TOKENS = {<TokenType.LIMIT: 'LIMIT'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.GEOGRAPHYPOINT: 'GEOGRAPHYPOINT'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.GET: 'GET'>, <TokenType.YEAR: 'YEAR'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.BIT: 'BIT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.CASE: 'CASE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.JSONB: 'JSONB'>, <TokenType.VOID: 'VOID'>, <TokenType.ENUM: 'ENUM'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.CUBE: 'CUBE'>, <TokenType.STAGE: 'STAGE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.MERGE: 'MERGE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.KILL: 'KILL'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.RANGE: 'RANGE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.ANY: 'ANY'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.APPLY: 'APPLY'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.UINT: 'UINT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.FINAL: 'FINAL'>, <TokenType.BINARY: 'BINARY'>, <TokenType.RENAME: 'RENAME'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.TOP: 'TOP'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.FIRST: 'FIRST'>, <TokenType.JSON: 'JSON'>, <TokenType.SOME: 'SOME'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.NULL: 'NULL'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.INDEX: 'INDEX'>, <TokenType.SINK: 'SINK'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE: 'DATE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.POINT: 'POINT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NEXT: 'NEXT'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MAP: 'MAP'>, <TokenType.INET: 'INET'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.LIST: 'LIST'>, <TokenType.INT128: 'INT128'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ALL: 'ALL'>, <TokenType.COPY: 'COPY'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.RING: 'RING'>, <TokenType.INT256: 'INT256'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ROW: 'ROW'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT256: 'UINT256'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.IPV4: 'IPV4'>, <TokenType.DIV: 'DIV'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DELETE: 'DELETE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.XML: 'XML'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.SET: 'SET'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DETACH: 'DETACH'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.UINT128: 'UINT128'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.TRUE: 'TRUE'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.VAR: 'VAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.FILTER: 'FILTER'>, <TokenType.USE: 'USE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.TIME: 'TIME'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.BLOB: 'BLOB'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.UUID: 'UUID'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.END: 'END'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.PUT: 'PUT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.IPV6: 'IPV6'>, <TokenType.SEMANTIC_VIEW: 'SEMANTIC_VIEW'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.INT: 'INT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.TAG: 'TAG'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.NAME: 'NAME'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DATETIME: 'DATETIME'>}
UPDATE_ALIAS_TOKENS = {<TokenType.LIMIT: 'LIMIT'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.GEOGRAPHYPOINT: 'GEOGRAPHYPOINT'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.GET: 'GET'>, <TokenType.YEAR: 'YEAR'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.BIT: 'BIT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.CASE: 'CASE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.IS: 'IS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.JSONB: 'JSONB'>, <TokenType.VOID: 'VOID'>, <TokenType.ENUM: 'ENUM'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.CUBE: 'CUBE'>, <TokenType.STAGE: 'STAGE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.MERGE: 'MERGE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.KILL: 'KILL'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.RANGE: 'RANGE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.ANY: 'ANY'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.APPLY: 'APPLY'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.UINT: 'UINT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.FINAL: 'FINAL'>, <TokenType.BINARY: 'BINARY'>, <TokenType.RENAME: 'RENAME'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.TOP: 'TOP'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.FIRST: 'FIRST'>, <TokenType.JSON: 'JSON'>, <TokenType.SOME: 'SOME'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.NULL: 'NULL'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.INDEX: 'INDEX'>, <TokenType.SINK: 'SINK'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE: 'DATE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.POINT: 'POINT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NEXT: 'NEXT'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MAP: 'MAP'>, <TokenType.INET: 'INET'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.LIST: 'LIST'>, <TokenType.INT128: 'INT128'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ALL: 'ALL'>, <TokenType.COPY: 'COPY'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.RING: 'RING'>, <TokenType.INT256: 'INT256'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ROW: 'ROW'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT256: 'UINT256'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.IPV4: 'IPV4'>, <TokenType.DIV: 'DIV'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DELETE: 'DELETE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.XML: 'XML'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DETACH: 'DETACH'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.UINT128: 'UINT128'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.TRUE: 'TRUE'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.VAR: 'VAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.FILTER: 'FILTER'>, <TokenType.USE: 'USE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.TIME: 'TIME'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.BLOB: 'BLOB'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.UUID: 'UUID'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.END: 'END'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.PUT: 'PUT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.IPV6: 'IPV6'>, <TokenType.SEMANTIC_VIEW: 'SEMANTIC_VIEW'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.INT: 'INT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.TAG: 'TAG'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.NAME: 'NAME'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DATETIME: 'DATETIME'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.And'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPLY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Apply'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_QUANTILES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantiles'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'APPROX_TOP_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopSum'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONCAT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcatAgg'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFirst'>>, 'ARRAY_INTERSECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayIntersect'>>, 'ARRAY_INTERSECTION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayIntersect'>>, 'ARRAY_LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayLast'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_REMOVE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayRemove'>>, 'ARRAY_REVERSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayReverse'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SLICE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySlice'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'ASCII': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ascii'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'BIT_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.BitwiseAndAgg'>>, 'BIT_COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.BitwiseCountAgg'>>, 'BIT_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.BitwiseOrAgg'>>, 'BIT_XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.BitwiseXorAgg'>>, 'BYTE_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ByteLength'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'CODE_POINTS_TO_BYTES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CodePointsToBytes'>>, 'CODE_POINTS_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CodePointsToString'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Columns'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Contains'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CONVERT_TO_CHARSET': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConvertToCharset'>>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CUME_DIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CumeDist'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_SCHEMA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentSchema'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_TIMESTAMP_L_T_Z': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestampLTZ'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_date>, 'DATE_ADD': <function build_date_delta_with_interval.<locals>._builder>, 'DATE_BIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateBin'>>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_FROM_UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromUnixDate'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function build_date_delta_with_interval.<locals>._builder>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME': <function _build_datetime>, 'DATETIME_ADD': <function build_date_delta_with_interval.<locals>._builder>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function build_date_delta_with_interval.<locals>._builder>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DECODE_CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DecodeCase'>>, 'DENSE_RANK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DenseRank'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'ENDS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.EndsWith'>>, 'ENDSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.EndsWith'>>, 'EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exists'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FARM_FINGERPRINT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FarmFingerprint'>>, 'FARMFINGERPRINT64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FarmFingerprint'>>, 'FEATURES_AT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FeaturesAtTime'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Flatten'>>, 'FLOAT64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Float64'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Format'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE32': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase32'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_EMBEDDING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateEmbedding'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GET_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GetExtract'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'GROUPING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Grouping'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'INT64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Int64'>>, 'IS_ASCII': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsAscii'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_APPEND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAppend'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSON_ARRAY_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayInsert'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExists'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_B_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBObjectAgg'>>, 'J_S_O_N_BOOL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBool'>>, 'J_S_O_N_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONCast'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_ARRAY': <function _build_extract_json_with_default_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function _build_extract_json_with_default_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_KEYS_AT_DEPTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONKeysAtDepth'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'JSON_REMOVE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONRemove'>>, 'JSON_SET': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONSet'>>, 'JSON_STRIP_NULLS': <function _build_json_strip_nulls>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'JSON_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONType'>>, 'J_S_O_N_VALUE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONValueArray'>>, 'JUSTIFY_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JustifyDays'>>, 'JUSTIFY_HOURS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JustifyHours'>>, 'JUSTIFY_INTERVAL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JustifyInterval'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LAX_BOOL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LaxBool'>>, 'LAX_FLOAT64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LaxFloat64'>>, 'LAX_INT64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LaxInt64'>>, 'LAX_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LaxString'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function BigQuery.Parser.<lambda>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'CHAR_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'CHARACTER_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAKE_INTERVAL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MakeInterval'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MEDIAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Median'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ntile'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Or'>>, 'OVERLAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Overlay'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_BIGNUMERIC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseBignumeric'>>, 'PARSE_DATETIME': <function BigQuery.Parser.<lambda>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PARSE_NUMERIC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseNumeric'>>, 'PARSE_TIME': <function BigQuery.Parser.<lambda>>, 'PERCENT_RANK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentRank'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'RANK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rank'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_EXTRACT_ALL': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_INSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpInstr'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Replace'>>, 'REVERSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reverse'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONVERT_BYTES_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConvertBytesToString'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SOUNDEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Soundex'>>, 'SPACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Space'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SPLIT_PART': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SplitPart'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'ST_DISTANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StDistance'>>, 'ST_POINT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StPoint'>>, 'ST_MAKEPOINT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StPoint'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.String'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRTOK_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTRING_INDEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SubstringIndex'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <function _build_time>, 'TIME_ADD': <function build_date_delta_with_interval.<locals>._builder>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function build_date_delta_with_interval.<locals>._builder>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <function _build_timestamp>, 'TIMESTAMP_ADD': <function build_date_delta_with_interval.<locals>._builder>, 'TIMESTAMPDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampFromParts'>>, 'TIMESTAMPFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampFromParts'>>, 'TIMESTAMP_SUB': <function build_date_delta_with_interval.<locals>._builder>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE32': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase32'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_CODE_POINTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToCodePoints'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_DOUBLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDouble'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToNumber'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRANSLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Translate'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDatetime'>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'TYPEOF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Typeof'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNICODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unicode'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_MICROS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixMicros'>>, 'UNIX_MILLIS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixMillis'>>, 'UNIX_SECONDS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixSeconds'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VECTOR_SEARCH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VectorSearch'>>, 'WEEK': <function BigQuery.Parser.<lambda>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'XMLELEMENT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLElement'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'STRPOS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'CHARINDEX': <function Parser.<lambda>>, 'INSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'LOCATE': <function Parser.<lambda>>, 'TO_HEX': <function _build_to_hex>, 'APPROX_TOP_COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'BOOL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBool'>>, 'CONTAINS_SUBSTR': <function _build_contains_substring>, 'DIV': <function binary_from_function.<locals>.<lambda>>, 'EDIT_DISTANCE': <function _build_levenshtein>, 'FORMAT_DATE': <function _build_format_time.<locals>._builder>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'JSON_EXTRACT_STRING_ARRAY': <function _build_extract_json_with_default_path.<locals>._builder>, 'JSON_KEYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONKeysAtDepth'>>, 'JSON_QUERY': <function build_extract_json_with_path.<locals>._builder>, 'JSON_QUERY_ARRAY': <function _build_extract_json_with_default_path.<locals>._builder>, 'JSON_VALUE': <function _build_extract_json_with_default_path.<locals>._builder>, 'JSON_VALUE_ARRAY': <function _build_extract_json_with_default_path.<locals>._builder>, 'NORMALIZE_AND_CASEFOLD': <function BigQuery.Parser.<lambda>>, 'OCTET_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ByteLength'>>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _build_parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_SUBSTR': <function _build_regexp_extract.<locals>._builder>, 'SHA256': <function BigQuery.Parser.<lambda>>, 'SHA512': <function BigQuery.Parser.<lambda>>, 'TIMESTAMP_MICROS': <function BigQuery.Parser.<lambda>>, 'TIMESTAMP_MILLIS': <function BigQuery.Parser.<lambda>>, 'TIMESTAMP_SECONDS': <function BigQuery.Parser.<lambda>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'FORMAT_DATETIME': <function _build_format_time.<locals>._builder>, 'FORMAT_TIMESTAMP': <function _build_format_time.<locals>._builder>, 'FORMAT_TIME': <function _build_format_time.<locals>._builder>, 'FROM_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>}
FUNCTION_PARSERS = {'ARG_MAX': <function Parser.<dictcomp>.<lambda>>, 'ARGMAX': <function Parser.<dictcomp>.<lambda>>, 'MAX_BY': <function Parser.<dictcomp>.<lambda>>, 'ARG_MIN': <function Parser.<dictcomp>.<lambda>>, 'ARGMIN': <function Parser.<dictcomp>.<lambda>>, 'MIN_BY': <function Parser.<dictcomp>.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CEIL': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'FLOOR': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'OVERLAY': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'XMLELEMENT': <function Parser.<lambda>>, 'XMLTABLE': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>, 'JSON_ARRAY': <function BigQuery.Parser.<lambda>>, 'MAKE_INTERVAL': <function BigQuery.Parser.<lambda>>, 'PREDICT': <function BigQuery.Parser.<lambda>>, 'FEATURES_AT_TIME': <function BigQuery.Parser.<lambda>>, 'GENERATE_EMBEDDING': <function BigQuery.Parser.<lambda>>, 'VECTOR_SEARCH': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS = {<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS = {<TokenType.LIST: 'LIST'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.UNION: 'UNION'>, <TokenType.MAP: 'MAP'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.RANGE: 'RANGE'>, <TokenType.STRUCT: 'STRUCT'>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'ENVIRONMENT': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WATERMARK': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'BUCKET': <function Parser.<lambda>>, 'TRUNCATE': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
RANGE_PARSERS = {<TokenType.AT_GT: 'AT_GT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.LT_AT: 'LT_AT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>}
NULL_TOKENS = {<TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.NULL: 'NULL'>}
DASHED_TABLE_PART_FOLLOW_TOKENS = {<TokenType.R_PAREN: 'R_PAREN'>, <TokenType.DOT: 'DOT'>, <TokenType.L_PAREN: 'L_PAREN'>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.ANALYZE: 'ANALYZE'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.GRANT: 'GRANT'>: <function Parser.<lambda>>, <TokenType.REVOKE: 'REVOKE'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UNPIVOT: 'UNPIVOT'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.ELSE: 'ELSE'>: <function BigQuery.Parser.<lambda>>, <TokenType.END: 'END'>: <function BigQuery.Parser.<lambda>>, <TokenType.FOR: 'FOR'>: <function BigQuery.Parser.<lambda>>, <TokenType.EXPORT: 'EXPORT'>: <function BigQuery.Parser.<lambda>>, <TokenType.DECLARE: 'DECLARE'>: <function BigQuery.Parser.<lambda>>}
BRACKET_OFFSETS = {'OFFSET': (0, False), 'ORDINAL': (1, False), 'SAFE_OFFSET': (0, True), 'SAFE_ORDINAL': (1, True)}
SHOW_TRIE: Dict = {}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
STRUCT_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
COLON_PLACEHOLDER_TOKENS
ARRAY_CONSTRUCTORS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
TIMESTAMPS
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
LAMBDAS
COLUMN_OPERATORS
CAST_COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
PIPE_SYNTAX_TRANSFORM_PARSERS
ALTER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
QUERY_MODIFIER_TOKENS
SET_PARSERS
SHOW_PARSERS
TYPE_LITERAL_PARSERS
TYPE_CONVERTERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
PROCEDURE_OPTIONS
EXECUTE_AS_OPTIONS
KEY_CONSTRAINT_OPTIONS
WINDOW_EXCLUDE_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
PRIVILEGE_FOLLOW_TOKENS
DESCRIBE_STYLES
ANALYZE_STYLES
ANALYZE_EXPRESSION_PARSERS
PARTITION_KEYWORDS
AMBIGUOUS_ALIAS_TOKENS
OPERATION_MODIFIERS
RECURSIVE_CTE_SEARCH_KIND
MODIFIABLES
STRICT_CAST
IDENTIFY_PIVOT_STRINGS
TABLESAMPLE_CSV
DEFAULT_SAMPLING_METHOD
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
COLON_IS_VARIANT_EXTRACT
VALUES_FOLLOWED_BY_PAREN
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
OPTIONAL_ALIAS_TOKEN_CTE
ALTER_RENAME_REQUIRES_COLUMN
ZONE_AWARE_TIMESTAMP_CONSTRUCTOR
MAP_KEYS_ARE_ARBITRARY_EXPRESSIONS
JSON_EXTRACT_REQUIRES_JSON_EXPRESSION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
parse_set_operation
build_cast
errors
sql
class BigQuery.Generator(sqlglot.generator.Generator):
1195    class Generator(generator.Generator):
1196        INTERVAL_ALLOWS_PLURAL_FORM = False
1197        JOIN_HINTS = False
1198        QUERY_HINTS = False
1199        TABLE_HINTS = False
1200        LIMIT_FETCH = "LIMIT"
1201        RENAME_TABLE_WITH_DB = False
1202        NVL2_SUPPORTED = False
1203        UNNEST_WITH_ORDINALITY = False
1204        COLLATE_IS_FUNC = True
1205        LIMIT_ONLY_LITERALS = True
1206        SUPPORTS_TABLE_ALIAS_COLUMNS = False
1207        UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
1208        JSON_KEY_VALUE_PAIR_SEP = ","
1209        NULL_ORDERING_SUPPORTED = False
1210        IGNORE_NULLS_IN_FUNC = True
1211        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
1212        CAN_IMPLEMENT_ARRAY_ANY = True
1213        SUPPORTS_TO_NUMBER = False
1214        NAMED_PLACEHOLDER_TOKEN = "@"
1215        HEX_FUNC = "TO_HEX"
1216        WITH_PROPERTIES_PREFIX = "OPTIONS"
1217        SUPPORTS_EXPLODING_PROJECTIONS = False
1218        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
1219        SUPPORTS_UNIX_SECONDS = True
1220
1221        SAFE_JSON_PATH_KEY_RE = re.compile(r"^[_\-a-zA-Z][\-\w]*$")
1222
1223        TS_OR_DS_TYPES = (
1224            exp.TsOrDsToDatetime,
1225            exp.TsOrDsToTimestamp,
1226            exp.TsOrDsToTime,
1227            exp.TsOrDsToDate,
1228        )
1229
1230        TRANSFORMS = {
1231            **generator.Generator.TRANSFORMS,
1232            exp.ApproxTopK: rename_func("APPROX_TOP_COUNT"),
1233            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1234            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
1235            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
1236            exp.Array: inline_array_unless_query,
1237            exp.ArrayContains: _array_contains_sql,
1238            exp.ArrayFilter: filter_array_using_unnest,
1239            exp.ArrayRemove: filter_array_using_unnest,
1240            exp.ByteLength: rename_func("BYTE_LENGTH"),
1241            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
1242            exp.CollateProperty: lambda self, e: (
1243                f"DEFAULT COLLATE {self.sql(e, 'this')}"
1244                if e.args.get("default")
1245                else f"COLLATE {self.sql(e, 'this')}"
1246            ),
1247            exp.Commit: lambda *_: "COMMIT TRANSACTION",
1248            exp.CountIf: rename_func("COUNTIF"),
1249            exp.Create: _create_sql,
1250            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
1251            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
1252            exp.DateDiff: lambda self, e: self.func(
1253                "DATE_DIFF", e.this, e.expression, unit_to_var(e)
1254            ),
1255            exp.DateFromParts: rename_func("DATE"),
1256            exp.DateStrToDate: datestrtodate_sql,
1257            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
1258            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
1259            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
1260            exp.DateFromUnixDate: rename_func("DATE_FROM_UNIX_DATE"),
1261            exp.FromTimeZone: lambda self, e: self.func(
1262                "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'"
1263            ),
1264            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
1265            exp.GroupConcat: lambda self, e: groupconcat_sql(
1266                self, e, func_name="STRING_AGG", within_group=False
1267            ),
1268            exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))),
1269            exp.HexString: lambda self, e: self.hexstring_sql(e, binary_function_repr="FROM_HEX"),
1270            exp.If: if_sql(false_value="NULL"),
1271            exp.ILike: no_ilike_sql,
1272            exp.IntDiv: rename_func("DIV"),
1273            exp.Int64: rename_func("INT64"),
1274            exp.JSONBool: rename_func("BOOL"),
1275            exp.JSONExtract: _json_extract_sql,
1276            exp.JSONExtractArray: _json_extract_sql,
1277            exp.JSONExtractScalar: _json_extract_sql,
1278            exp.JSONFormat: rename_func("TO_JSON_STRING"),
1279            exp.JSONKeysAtDepth: rename_func("JSON_KEYS"),
1280            exp.JSONValueArray: rename_func("JSON_VALUE_ARRAY"),
1281            exp.Levenshtein: _levenshtein_sql,
1282            exp.Max: max_or_greatest,
1283            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
1284            exp.MD5Digest: rename_func("MD5"),
1285            exp.Min: min_or_least,
1286            exp.Normalize: lambda self, e: self.func(
1287                "NORMALIZE_AND_CASEFOLD" if e.args.get("is_casefold") else "NORMALIZE",
1288                e.this,
1289                e.args.get("form"),
1290            ),
1291            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1292            exp.RegexpExtract: lambda self, e: self.func(
1293                "REGEXP_EXTRACT",
1294                e.this,
1295                e.expression,
1296                e.args.get("position"),
1297                e.args.get("occurrence"),
1298            ),
1299            exp.RegexpExtractAll: lambda self, e: self.func(
1300                "REGEXP_EXTRACT_ALL", e.this, e.expression
1301            ),
1302            exp.RegexpReplace: regexp_replace_sql,
1303            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
1304            exp.ReturnsProperty: _returnsproperty_sql,
1305            exp.Rollback: lambda *_: "ROLLBACK TRANSACTION",
1306            exp.ParseTime: lambda self, e: self.func("PARSE_TIME", self.format_time(e), e.this),
1307            exp.ParseDatetime: lambda self, e: self.func(
1308                "PARSE_DATETIME", self.format_time(e), e.this
1309            ),
1310            exp.Select: transforms.preprocess(
1311                [
1312                    transforms.explode_projection_to_unnest(),
1313                    transforms.unqualify_unnest,
1314                    transforms.eliminate_distinct_on,
1315                    _alias_ordered_group,
1316                    transforms.eliminate_semi_and_anti_joins,
1317                ]
1318            ),
1319            exp.SHA: rename_func("SHA1"),
1320            exp.SHA2: sha256_sql,
1321            exp.StabilityProperty: lambda self, e: (
1322                "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
1323            ),
1324            exp.String: rename_func("STRING"),
1325            exp.StrPosition: lambda self, e: (
1326                strposition_sql(
1327                    self, e, func_name="INSTR", supports_position=True, supports_occurrence=True
1328                )
1329            ),
1330            exp.StrToDate: _str_to_datetime_sql,
1331            exp.StrToTime: _str_to_datetime_sql,
1332            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
1333            exp.TimeFromParts: rename_func("TIME"),
1334            exp.TimestampFromParts: rename_func("DATETIME"),
1335            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
1336            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
1337            exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"),
1338            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
1339            exp.TimeStrToTime: timestrtotime_sql,
1340            exp.Transaction: lambda *_: "BEGIN TRANSACTION",
1341            exp.TsOrDsAdd: _ts_or_ds_add_sql,
1342            exp.TsOrDsDiff: _ts_or_ds_diff_sql,
1343            exp.TsOrDsToTime: rename_func("TIME"),
1344            exp.TsOrDsToDatetime: rename_func("DATETIME"),
1345            exp.TsOrDsToTimestamp: rename_func("TIMESTAMP"),
1346            exp.Unhex: rename_func("FROM_HEX"),
1347            exp.UnixDate: rename_func("UNIX_DATE"),
1348            exp.UnixToTime: _unix_to_time_sql,
1349            exp.Uuid: lambda *_: "GENERATE_UUID()",
1350            exp.Values: _derived_table_values_to_unnest,
1351            exp.VariancePop: rename_func("VAR_POP"),
1352            exp.SafeDivide: rename_func("SAFE_DIVIDE"),
1353        }
1354
1355        SUPPORTED_JSON_PATH_PARTS = {
1356            exp.JSONPathKey,
1357            exp.JSONPathRoot,
1358            exp.JSONPathSubscript,
1359        }
1360
1361        TYPE_MAPPING = {
1362            **generator.Generator.TYPE_MAPPING,
1363            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
1364            exp.DataType.Type.BIGINT: "INT64",
1365            exp.DataType.Type.BINARY: "BYTES",
1366            exp.DataType.Type.BLOB: "BYTES",
1367            exp.DataType.Type.BOOLEAN: "BOOL",
1368            exp.DataType.Type.CHAR: "STRING",
1369            exp.DataType.Type.DECIMAL: "NUMERIC",
1370            exp.DataType.Type.DOUBLE: "FLOAT64",
1371            exp.DataType.Type.FLOAT: "FLOAT64",
1372            exp.DataType.Type.INT: "INT64",
1373            exp.DataType.Type.NCHAR: "STRING",
1374            exp.DataType.Type.NVARCHAR: "STRING",
1375            exp.DataType.Type.SMALLINT: "INT64",
1376            exp.DataType.Type.TEXT: "STRING",
1377            exp.DataType.Type.TIMESTAMP: "DATETIME",
1378            exp.DataType.Type.TIMESTAMPNTZ: "DATETIME",
1379            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
1380            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
1381            exp.DataType.Type.TINYINT: "INT64",
1382            exp.DataType.Type.ROWVERSION: "BYTES",
1383            exp.DataType.Type.UUID: "STRING",
1384            exp.DataType.Type.VARBINARY: "BYTES",
1385            exp.DataType.Type.VARCHAR: "STRING",
1386            exp.DataType.Type.VARIANT: "ANY TYPE",
1387        }
1388
1389        PROPERTIES_LOCATION = {
1390            **generator.Generator.PROPERTIES_LOCATION,
1391            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1392            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1393        }
1394
1395        # WINDOW comes after QUALIFY
1396        # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#window_clause
1397        AFTER_HAVING_MODIFIER_TRANSFORMS = {
1398            "qualify": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["qualify"],
1399            "windows": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["windows"],
1400        }
1401
1402        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
1403        RESERVED_KEYWORDS = {
1404            "all",
1405            "and",
1406            "any",
1407            "array",
1408            "as",
1409            "asc",
1410            "assert_rows_modified",
1411            "at",
1412            "between",
1413            "by",
1414            "case",
1415            "cast",
1416            "collate",
1417            "contains",
1418            "create",
1419            "cross",
1420            "cube",
1421            "current",
1422            "default",
1423            "define",
1424            "desc",
1425            "distinct",
1426            "else",
1427            "end",
1428            "enum",
1429            "escape",
1430            "except",
1431            "exclude",
1432            "exists",
1433            "extract",
1434            "false",
1435            "fetch",
1436            "following",
1437            "for",
1438            "from",
1439            "full",
1440            "group",
1441            "grouping",
1442            "groups",
1443            "hash",
1444            "having",
1445            "if",
1446            "ignore",
1447            "in",
1448            "inner",
1449            "intersect",
1450            "interval",
1451            "into",
1452            "is",
1453            "join",
1454            "lateral",
1455            "left",
1456            "like",
1457            "limit",
1458            "lookup",
1459            "merge",
1460            "natural",
1461            "new",
1462            "no",
1463            "not",
1464            "null",
1465            "nulls",
1466            "of",
1467            "on",
1468            "or",
1469            "order",
1470            "outer",
1471            "over",
1472            "partition",
1473            "preceding",
1474            "proto",
1475            "qualify",
1476            "range",
1477            "recursive",
1478            "respect",
1479            "right",
1480            "rollup",
1481            "rows",
1482            "select",
1483            "set",
1484            "some",
1485            "struct",
1486            "tablesample",
1487            "then",
1488            "to",
1489            "treat",
1490            "true",
1491            "unbounded",
1492            "union",
1493            "unnest",
1494            "using",
1495            "when",
1496            "where",
1497            "window",
1498            "with",
1499            "within",
1500        }
1501
1502        def datetrunc_sql(self, expression: exp.DateTrunc) -> str:
1503            unit = expression.unit
1504            unit_sql = unit.name if unit.is_string else self.sql(unit)
1505            return self.func("DATE_TRUNC", expression.this, unit_sql, expression.args.get("zone"))
1506
1507        def mod_sql(self, expression: exp.Mod) -> str:
1508            this = expression.this
1509            expr = expression.expression
1510            return self.func(
1511                "MOD",
1512                this.unnest() if isinstance(this, exp.Paren) else this,
1513                expr.unnest() if isinstance(expr, exp.Paren) else expr,
1514            )
1515
1516        def column_parts(self, expression: exp.Column) -> str:
1517            if expression.meta.get("quoted_column"):
1518                # If a column reference is of the form `dataset.table`.name, we need
1519                # to preserve the quoted table path, otherwise the reference breaks
1520                table_parts = ".".join(p.name for p in expression.parts[:-1])
1521                table_path = self.sql(exp.Identifier(this=table_parts, quoted=True))
1522                return f"{table_path}.{self.sql(expression, 'this')}"
1523
1524            return super().column_parts(expression)
1525
1526        def table_parts(self, expression: exp.Table) -> str:
1527            # Depending on the context, `x.y` may not resolve to the same data source as `x`.`y`, so
1528            # we need to make sure the correct quoting is used in each case.
1529            #
1530            # For example, if there is a CTE x that clashes with a schema name, then the former will
1531            # return the table y in that schema, whereas the latter will return the CTE's y column:
1532            #
1533            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x.y`   -> cross join
1534            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x`.`y` -> implicit unnest
1535            if expression.meta.get("quoted_table"):
1536                table_parts = ".".join(p.name for p in expression.parts)
1537                return self.sql(exp.Identifier(this=table_parts, quoted=True))
1538
1539            return super().table_parts(expression)
1540
1541        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1542            this = expression.this
1543            if isinstance(this, exp.TsOrDsToDatetime):
1544                func_name = "FORMAT_DATETIME"
1545            elif isinstance(this, exp.TsOrDsToTimestamp):
1546                func_name = "FORMAT_TIMESTAMP"
1547            elif isinstance(this, exp.TsOrDsToTime):
1548                func_name = "FORMAT_TIME"
1549            else:
1550                func_name = "FORMAT_DATE"
1551
1552            time_expr = this if isinstance(this, self.TS_OR_DS_TYPES) else expression
1553            return self.func(
1554                func_name, self.format_time(expression), time_expr.this, expression.args.get("zone")
1555            )
1556
1557        def eq_sql(self, expression: exp.EQ) -> str:
1558            # Operands of = cannot be NULL in BigQuery
1559            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
1560                if not isinstance(expression.parent, exp.Update):
1561                    return "NULL"
1562
1563            return self.binary(expression, "=")
1564
1565        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
1566            parent = expression.parent
1567
1568            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
1569            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
1570            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
1571                return self.func(
1572                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
1573                )
1574
1575            return super().attimezone_sql(expression)
1576
1577        def trycast_sql(self, expression: exp.TryCast) -> str:
1578            return self.cast_sql(expression, safe_prefix="SAFE_")
1579
1580        def bracket_sql(self, expression: exp.Bracket) -> str:
1581            this = expression.this
1582            expressions = expression.expressions
1583
1584            if len(expressions) == 1 and this and this.is_type(exp.DataType.Type.STRUCT):
1585                arg = expressions[0]
1586                if arg.type is None:
1587                    from sqlglot.optimizer.annotate_types import annotate_types
1588
1589                    arg = annotate_types(arg, dialect=self.dialect)
1590
1591                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
1592                    # BQ doesn't support bracket syntax with string values for structs
1593                    return f"{self.sql(this)}.{arg.name}"
1594
1595            expressions_sql = self.expressions(expression, flat=True)
1596            offset = expression.args.get("offset")
1597
1598            if offset == 0:
1599                expressions_sql = f"OFFSET({expressions_sql})"
1600            elif offset == 1:
1601                expressions_sql = f"ORDINAL({expressions_sql})"
1602            elif offset is not None:
1603                self.unsupported(f"Unsupported array offset: {offset}")
1604
1605            if expression.args.get("safe"):
1606                expressions_sql = f"SAFE_{expressions_sql}"
1607
1608            return f"{self.sql(this)}[{expressions_sql}]"
1609
1610        def in_unnest_op(self, expression: exp.Unnest) -> str:
1611            return self.sql(expression)
1612
1613        def version_sql(self, expression: exp.Version) -> str:
1614            if expression.name == "TIMESTAMP":
1615                expression.set("this", "SYSTEM_TIME")
1616            return super().version_sql(expression)
1617
1618        def contains_sql(self, expression: exp.Contains) -> str:
1619            this = expression.this
1620            expr = expression.expression
1621
1622            if isinstance(this, exp.Lower) and isinstance(expr, exp.Lower):
1623                this = this.this
1624                expr = expr.this
1625
1626            return self.func("CONTAINS_SUBSTR", this, expr, expression.args.get("json_scope"))
1627
1628        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1629            this = expression.this
1630
1631            # This ensures that inline type-annotated ARRAY literals like ARRAY<INT64>[1, 2, 3]
1632            # are roundtripped unaffected. The inner check excludes ARRAY(SELECT ...) expressions,
1633            # because they aren't literals and so the above syntax is invalid BigQuery.
1634            if isinstance(this, exp.Array):
1635                elem = seq_get(this.expressions, 0)
1636                if not (elem and elem.find(exp.Query)):
1637                    return f"{self.sql(expression, 'to')}{self.sql(this)}"
1638
1639            return super().cast_sql(expression, safe_prefix=safe_prefix)
1640
1641        def declareitem_sql(self, expression: exp.DeclareItem) -> str:
1642            variables = self.expressions(expression, "this")
1643            default = self.sql(expression, "default")
1644            default = f" DEFAULT {default}" if default else ""
1645            kind = self.sql(expression, "kind")
1646            kind = f" {kind}" if kind else ""
1647
1648            return f"{variables}{kind}{default}"

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
INTERVAL_ALLOWS_PLURAL_FORM = False
JOIN_HINTS = False
QUERY_HINTS = False
TABLE_HINTS = False
LIMIT_FETCH = 'LIMIT'
RENAME_TABLE_WITH_DB = False
NVL2_SUPPORTED = False
UNNEST_WITH_ORDINALITY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
SUPPORTS_TABLE_ALIAS_COLUMNS = False
UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
JSON_KEY_VALUE_PAIR_SEP = ','
NULL_ORDERING_SUPPORTED = False
IGNORE_NULLS_IN_FUNC = True
JSON_PATH_SINGLE_QUOTE_ESCAPE = True
CAN_IMPLEMENT_ARRAY_ANY = True
SUPPORTS_TO_NUMBER = False
NAMED_PLACEHOLDER_TOKEN = '@'
HEX_FUNC = 'TO_HEX'
WITH_PROPERTIES_PREFIX = 'OPTIONS'
SUPPORTS_EXPLODING_PROJECTIONS = False
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_UNIX_SECONDS = True
SAFE_JSON_PATH_KEY_RE = re.compile('^[_\\-a-zA-Z][\\-\\w]*$')
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AnalyzeColumns'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AnalyzeWith'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Ceil'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConvertToCharset'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CredentialsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EnviromentProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Floor'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Get'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Int64'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByBucket'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PartitionByTruncate'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PositionalColumn'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Put'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SwapTable'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TableColumn'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Tags'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UsingTemplateProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UsingData'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WeekStart'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithProcedureOptions'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ForceProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxTopK'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function arg_max_or_min_no_count.<locals>._arg_max_or_min_sql>, <class 'sqlglot.expressions.ArgMin'>: <function arg_max_or_min_no_count.<locals>._arg_max_or_min_sql>, <class 'sqlglot.expressions.Array'>: <function inline_array_unless_query>, <class 'sqlglot.expressions.ArrayContains'>: <function _array_contains_sql>, <class 'sqlglot.expressions.ArrayFilter'>: <function filter_array_using_unnest>, <class 'sqlglot.expressions.ArrayRemove'>: <function filter_array_using_unnest>, <class 'sqlglot.expressions.ByteLength'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.CollateProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.Commit'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.CountIf'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DateFromUnixDate'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.HexString'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONBool'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function _json_extract_sql>, <class 'sqlglot.expressions.JSONExtractArray'>: <function _json_extract_sql>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function _json_extract_sql>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONKeysAtDepth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONValueArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Levenshtein'>: <function _levenshtein_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.Normalize'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtractAll'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Rollback'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.ParseTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.ParseDatetime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.SHA2'>: <function sha256_sql>, <class 'sqlglot.expressions.String'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function _str_to_datetime_sql>, <class 'sqlglot.expressions.StrToTime'>: <function _str_to_datetime_sql>, <class 'sqlglot.expressions.TimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampDiff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Transaction'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _ts_or_ds_add_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function _ts_or_ds_diff_sql>, <class 'sqlglot.expressions.TsOrDsToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDatetime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsToTimestamp'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixDate'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function _unix_to_time_sql>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.SafeDivide'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.DATETIME2: 'DATETIME2'>: 'TIMESTAMP', <Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.BLOB: 'BLOB'>: 'BYTES', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'BYTES', <Type.SMALLDATETIME: 'SMALLDATETIME'>: 'TIMESTAMP', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.UUID: 'UUID'>: 'STRING', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EncodeProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EnviromentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.IncludeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StorageHandlerProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Tags'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.UsingTemplateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithProcedureOptions'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ForceProperty'>: <Location.POST_CREATE: 'POST_CREATE'>}
AFTER_HAVING_MODIFIER_TRANSFORMS = {'qualify': <function Generator.<lambda>>, 'windows': <function Generator.<lambda>>}
RESERVED_KEYWORDS = {'null', 'proto', 'contains', 'inner', 'from', 'define', 'grouping', 'assert_rows_modified', 'like', 'no', 'unbounded', 'in', 'lookup', 'else', 'respect', 'select', 'distinct', 'full', 'where', 'exists', 'false', 'within', 'case', 'between', 'if', 'not', 'order', 'collate', 'except', 'ignore', 'partition', 'with', 'array', 'cross', 'when', 'lateral', 'current', 'set', 'window', 'tablesample', 'intersect', 'new', 'fetch', 'using', 'rollup', 'by', 'on', 'natural', 'some', 'or', 'right', 'as', 'union', 'enum', 'left', 'create', 'treat', 'join', 'and', 'group', 'into', 'end', 'to', 'asc', 'extract', 'following', 'merge', 'cast', 'escape', 'cube', 'range', 'at', 'over', 'unnest', 'groups', 'all', 'desc', 'limit', 'interval', 'struct', 'outer', 'default', 'is', 'qualify', 'then', 'hash', 'recursive', 'rows', 'any', 'true', 'exclude', 'having', 'nulls', 'preceding', 'for', 'of'}
def datetrunc_sql(self, expression: sqlglot.expressions.DateTrunc) -> str:
1502        def datetrunc_sql(self, expression: exp.DateTrunc) -> str:
1503            unit = expression.unit
1504            unit_sql = unit.name if unit.is_string else self.sql(unit)
1505            return self.func("DATE_TRUNC", expression.this, unit_sql, expression.args.get("zone"))
def mod_sql(self, expression: sqlglot.expressions.Mod) -> str:
1507        def mod_sql(self, expression: exp.Mod) -> str:
1508            this = expression.this
1509            expr = expression.expression
1510            return self.func(
1511                "MOD",
1512                this.unnest() if isinstance(this, exp.Paren) else this,
1513                expr.unnest() if isinstance(expr, exp.Paren) else expr,
1514            )
def column_parts(self, expression: sqlglot.expressions.Column) -> str:
1516        def column_parts(self, expression: exp.Column) -> str:
1517            if expression.meta.get("quoted_column"):
1518                # If a column reference is of the form `dataset.table`.name, we need
1519                # to preserve the quoted table path, otherwise the reference breaks
1520                table_parts = ".".join(p.name for p in expression.parts[:-1])
1521                table_path = self.sql(exp.Identifier(this=table_parts, quoted=True))
1522                return f"{table_path}.{self.sql(expression, 'this')}"
1523
1524            return super().column_parts(expression)
def table_parts(self, expression: sqlglot.expressions.Table) -> str:
1526        def table_parts(self, expression: exp.Table) -> str:
1527            # Depending on the context, `x.y` may not resolve to the same data source as `x`.`y`, so
1528            # we need to make sure the correct quoting is used in each case.
1529            #
1530            # For example, if there is a CTE x that clashes with a schema name, then the former will
1531            # return the table y in that schema, whereas the latter will return the CTE's y column:
1532            #
1533            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x.y`   -> cross join
1534            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x`.`y` -> implicit unnest
1535            if expression.meta.get("quoted_table"):
1536                table_parts = ".".join(p.name for p in expression.parts)
1537                return self.sql(exp.Identifier(this=table_parts, quoted=True))
1538
1539            return super().table_parts(expression)
def timetostr_sql(self, expression: sqlglot.expressions.TimeToStr) -> str:
1541        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1542            this = expression.this
1543            if isinstance(this, exp.TsOrDsToDatetime):
1544                func_name = "FORMAT_DATETIME"
1545            elif isinstance(this, exp.TsOrDsToTimestamp):
1546                func_name = "FORMAT_TIMESTAMP"
1547            elif isinstance(this, exp.TsOrDsToTime):
1548                func_name = "FORMAT_TIME"
1549            else:
1550                func_name = "FORMAT_DATE"
1551
1552            time_expr = this if isinstance(this, self.TS_OR_DS_TYPES) else expression
1553            return self.func(
1554                func_name, self.format_time(expression), time_expr.this, expression.args.get("zone")
1555            )
def eq_sql(self, expression: sqlglot.expressions.EQ) -> str:
1557        def eq_sql(self, expression: exp.EQ) -> str:
1558            # Operands of = cannot be NULL in BigQuery
1559            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
1560                if not isinstance(expression.parent, exp.Update):
1561                    return "NULL"
1562
1563            return self.binary(expression, "=")
def attimezone_sql(self, expression: sqlglot.expressions.AtTimeZone) -> str:
1565        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
1566            parent = expression.parent
1567
1568            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
1569            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
1570            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
1571                return self.func(
1572                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
1573                )
1574
1575            return super().attimezone_sql(expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
1577        def trycast_sql(self, expression: exp.TryCast) -> str:
1578            return self.cast_sql(expression, safe_prefix="SAFE_")
def bracket_sql(self, expression: sqlglot.expressions.Bracket) -> str:
1580        def bracket_sql(self, expression: exp.Bracket) -> str:
1581            this = expression.this
1582            expressions = expression.expressions
1583
1584            if len(expressions) == 1 and this and this.is_type(exp.DataType.Type.STRUCT):
1585                arg = expressions[0]
1586                if arg.type is None:
1587                    from sqlglot.optimizer.annotate_types import annotate_types
1588
1589                    arg = annotate_types(arg, dialect=self.dialect)
1590
1591                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
1592                    # BQ doesn't support bracket syntax with string values for structs
1593                    return f"{self.sql(this)}.{arg.name}"
1594
1595            expressions_sql = self.expressions(expression, flat=True)
1596            offset = expression.args.get("offset")
1597
1598            if offset == 0:
1599                expressions_sql = f"OFFSET({expressions_sql})"
1600            elif offset == 1:
1601                expressions_sql = f"ORDINAL({expressions_sql})"
1602            elif offset is not None:
1603                self.unsupported(f"Unsupported array offset: {offset}")
1604
1605            if expression.args.get("safe"):
1606                expressions_sql = f"SAFE_{expressions_sql}"
1607
1608            return f"{self.sql(this)}[{expressions_sql}]"
def in_unnest_op(self, expression: sqlglot.expressions.Unnest) -> str:
1610        def in_unnest_op(self, expression: exp.Unnest) -> str:
1611            return self.sql(expression)
def version_sql(self, expression: sqlglot.expressions.Version) -> str:
1613        def version_sql(self, expression: exp.Version) -> str:
1614            if expression.name == "TIMESTAMP":
1615                expression.set("this", "SYSTEM_TIME")
1616            return super().version_sql(expression)
def contains_sql(self, expression: sqlglot.expressions.Contains) -> str:
1618        def contains_sql(self, expression: exp.Contains) -> str:
1619            this = expression.this
1620            expr = expression.expression
1621
1622            if isinstance(this, exp.Lower) and isinstance(expr, exp.Lower):
1623                this = this.this
1624                expr = expr.this
1625
1626            return self.func("CONTAINS_SUBSTR", this, expr, expression.args.get("json_scope"))
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
1628        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1629            this = expression.this
1630
1631            # This ensures that inline type-annotated ARRAY literals like ARRAY<INT64>[1, 2, 3]
1632            # are roundtripped unaffected. The inner check excludes ARRAY(SELECT ...) expressions,
1633            # because they aren't literals and so the above syntax is invalid BigQuery.
1634            if isinstance(this, exp.Array):
1635                elem = seq_get(this.expressions, 0)
1636                if not (elem and elem.find(exp.Query)):
1637                    return f"{self.sql(expression, 'to')}{self.sql(this)}"
1638
1639            return super().cast_sql(expression, safe_prefix=safe_prefix)
def declareitem_sql(self, expression: sqlglot.expressions.DeclareItem) -> str:
1641        def declareitem_sql(self, expression: exp.DeclareItem) -> str:
1642            variables = self.expressions(expression, "this")
1643            default = self.sql(expression, "default")
1644            default = f" DEFAULT {default}" if default else ""
1645            kind = self.sql(expression, "kind")
1646            kind = f" {kind}" if kind else ""
1647
1648            return f"{variables}{kind}{default}"
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
SUPPORTS_DECODE_CASE = False
Inherited Members
sqlglot.generator.Generator
Generator
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
MATCHED_BY_SOURCE
SINGLE_STRING_INTERVAL
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
SELECT_KINDS
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
AGGREGATE_FILTER_SUPPORTED
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
SUPPORTS_TABLE_COPY
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
INSERT_OVERWRITE
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
SUPPORTS_WINDOW_EXCLUDE
SET_OP_MODIFIERS
COPY_PARAMS_ARE_WRAPPED
COPY_PARAMS_EQ_REQUIRED
COPY_HAS_INTO_KEYWORD
STAR_EXCEPT
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
ARRAY_CONCAT_IS_VAR_LEN
SUPPORTS_CONVERT_TIMEZONE
SUPPORTS_MEDIAN
ALTER_SET_WRAPPED
NORMALIZE_EXTRACT_DATE_PARTS
PARSE_JSON_NAME
ARRAY_SIZE_NAME
ALTER_SET_TYPE
ARRAY_SIZE_DIM_REQUIRED
SUPPORTS_BETWEEN_FLAGS
SUPPORTS_LIKE_QUANTIFIERS
UNSUPPORTED_TYPES
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
PARAMETER_TOKEN
EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
sanitize_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
describe_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
datatype_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
limitoptions_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
with_properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_sql
tablefromrows_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
queryband_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
for_modifiers
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
unnest_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
formatphrase_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterindex_sql
alterdiststyle_sql
altersortkey_sql
alterrename_sql
renamecolumn_sql
alterset_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
addpartition_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
safedivide_sql
overlaps_sql
distance_sql
dot_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
is_sql
like_sql
ilike_sql
similarto_sql
lt_sql
lte_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
jsoncast_sql
try_sql
log_sql
use_sql
binary
ceil_floor
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
whens_sql
merge_sql
tochar_sql
tonumber_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
uniquekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
generateembedding_sql
featuresattime_sql
vectorsearch_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodatetime_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
struct_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonextractquote_sql
jsonexists_sql
arrayagg_sql
apply_sql
grant_sql
revoke_sql
grantprivilege_sql
grantprincipal_sql
columns_sql
overlay_sql
todouble_sql
string_sql
median_sql
overflowtruncatebehavior_sql
unixseconds_sql
arraysize_sql
attach_sql
detach_sql
attachoption_sql
watermarkcolumnconstraint_sql
encodeproperty_sql
includeproperty_sql
xmlelement_sql
xmlkeyvalueoption_sql
partitionbyrangeproperty_sql
partitionbyrangepropertydynamic_sql
unpivotcolumns_sql
analyzesample_sql
analyzestatistics_sql
analyzehistogram_sql
analyzedelete_sql
analyzelistchainedrows_sql
analyzevalidate_sql
analyze_sql
xmltable_sql
xmlnamespace_sql
export_sql
declare_sql
recursivewithsearch_sql
parameterizedagg_sql
anonymousaggfunc_sql
combinedaggfunc_sql
combinedparameterizedagg_sql
show_sql
get_put_sql
translatecharacters_sql
decodecase_sql
semanticview_sql
getextract_sql
datefromunixdate_sql
space_sql
buildproperty_sql
refreshtriggerproperty_sql