Edit on GitHub

sqlglot.dialects.bigquery

  1from __future__ import annotations
  2
  3import logging
  4import re
  5import typing as t
  6
  7from sqlglot import exp, generator, parser, tokens, transforms
  8from sqlglot.dialects.dialect import (
  9    Dialect,
 10    NormalizationStrategy,
 11    arg_max_or_min_no_count,
 12    binary_from_function,
 13    date_add_interval_sql,
 14    datestrtodate_sql,
 15    build_formatted_time,
 16    filter_array_using_unnest,
 17    if_sql,
 18    inline_array_unless_query,
 19    max_or_greatest,
 20    min_or_least,
 21    no_ilike_sql,
 22    build_date_delta_with_interval,
 23    regexp_replace_sql,
 24    rename_func,
 25    sha256_sql,
 26    timestrtotime_sql,
 27    ts_or_ds_add_cast,
 28    unit_to_var,
 29)
 30from sqlglot.helper import seq_get, split_num_words
 31from sqlglot.tokens import TokenType
 32
 33if t.TYPE_CHECKING:
 34    from sqlglot._typing import E, Lit
 35
 36logger = logging.getLogger("sqlglot")
 37
 38
 39def _derived_table_values_to_unnest(self: BigQuery.Generator, expression: exp.Values) -> str:
 40    if not expression.find_ancestor(exp.From, exp.Join):
 41        return self.values_sql(expression)
 42
 43    structs = []
 44    alias = expression.args.get("alias")
 45    for tup in expression.find_all(exp.Tuple):
 46        field_aliases = (
 47            alias.columns
 48            if alias and alias.columns
 49            else (f"_c{i}" for i in range(len(tup.expressions)))
 50        )
 51        expressions = [
 52            exp.PropertyEQ(this=exp.to_identifier(name), expression=fld)
 53            for name, fld in zip(field_aliases, tup.expressions)
 54        ]
 55        structs.append(exp.Struct(expressions=expressions))
 56
 57    # Due to `UNNEST_COLUMN_ONLY`, it is expected that the table alias be contained in the columns expression
 58    alias_name_only = exp.TableAlias(columns=[alias.this]) if alias else None
 59    return self.unnest_sql(
 60        exp.Unnest(expressions=[exp.array(*structs, copy=False)], alias=alias_name_only)
 61    )
 62
 63
 64def _returnsproperty_sql(self: BigQuery.Generator, expression: exp.ReturnsProperty) -> str:
 65    this = expression.this
 66    if isinstance(this, exp.Schema):
 67        this = f"{self.sql(this, 'this')} <{self.expressions(this)}>"
 68    else:
 69        this = self.sql(this)
 70    return f"RETURNS {this}"
 71
 72
 73def _create_sql(self: BigQuery.Generator, expression: exp.Create) -> str:
 74    returns = expression.find(exp.ReturnsProperty)
 75    if expression.kind == "FUNCTION" and returns and returns.args.get("is_table"):
 76        expression.set("kind", "TABLE FUNCTION")
 77
 78        if isinstance(expression.expression, (exp.Subquery, exp.Literal)):
 79            expression.set("expression", expression.expression.this)
 80
 81    return self.create_sql(expression)
 82
 83
 84# https://issuetracker.google.com/issues/162294746
 85# workaround for bigquery bug when grouping by an expression and then ordering
 86# WITH x AS (SELECT 1 y)
 87# SELECT y + 1 z
 88# FROM x
 89# GROUP BY x + 1
 90# ORDER by z
 91def _alias_ordered_group(expression: exp.Expression) -> exp.Expression:
 92    if isinstance(expression, exp.Select):
 93        group = expression.args.get("group")
 94        order = expression.args.get("order")
 95
 96        if group and order:
 97            aliases = {
 98                select.this: select.args["alias"]
 99                for select in expression.selects
100                if isinstance(select, exp.Alias)
101            }
102
103            for grouped in group.expressions:
104                if grouped.is_int:
105                    continue
106                alias = aliases.get(grouped)
107                if alias:
108                    grouped.replace(exp.column(alias))
109
110    return expression
111
112
113def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression:
114    """BigQuery doesn't allow column names when defining a CTE, so we try to push them down."""
115    if isinstance(expression, exp.CTE) and expression.alias_column_names:
116        cte_query = expression.this
117
118        if cte_query.is_star:
119            logger.warning(
120                "Can't push down CTE column names for star queries. Run the query through"
121                " the optimizer or use 'qualify' to expand the star projections first."
122            )
123            return expression
124
125        column_names = expression.alias_column_names
126        expression.args["alias"].set("columns", None)
127
128        for name, select in zip(column_names, cte_query.selects):
129            to_replace = select
130
131            if isinstance(select, exp.Alias):
132                select = select.this
133
134            # Inner aliases are shadowed by the CTE column names
135            to_replace.replace(exp.alias_(select, name))
136
137    return expression
138
139
140def _build_parse_timestamp(args: t.List) -> exp.StrToTime:
141    this = build_formatted_time(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)])
142    this.set("zone", seq_get(args, 2))
143    return this
144
145
146def _build_timestamp(args: t.List) -> exp.Timestamp:
147    timestamp = exp.Timestamp.from_arg_list(args)
148    timestamp.set("with_tz", True)
149    return timestamp
150
151
152def _build_date(args: t.List) -> exp.Date | exp.DateFromParts:
153    expr_type = exp.DateFromParts if len(args) == 3 else exp.Date
154    return expr_type.from_arg_list(args)
155
156
157def _build_to_hex(args: t.List) -> exp.Hex | exp.MD5:
158    # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation
159    arg = seq_get(args, 0)
160    return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.LowerHex(this=arg)
161
162
163def _array_contains_sql(self: BigQuery.Generator, expression: exp.ArrayContains) -> str:
164    return self.sql(
165        exp.Exists(
166            this=exp.select("1")
167            .from_(exp.Unnest(expressions=[expression.left]).as_("_unnest", table=["_col"]))
168            .where(exp.column("_col").eq(expression.right))
169        )
170    )
171
172
173def _ts_or_ds_add_sql(self: BigQuery.Generator, expression: exp.TsOrDsAdd) -> str:
174    return date_add_interval_sql("DATE", "ADD")(self, ts_or_ds_add_cast(expression))
175
176
177def _ts_or_ds_diff_sql(self: BigQuery.Generator, expression: exp.TsOrDsDiff) -> str:
178    expression.this.replace(exp.cast(expression.this, exp.DataType.Type.TIMESTAMP))
179    expression.expression.replace(exp.cast(expression.expression, exp.DataType.Type.TIMESTAMP))
180    unit = unit_to_var(expression)
181    return self.func("DATE_DIFF", expression.this, expression.expression, unit)
182
183
184def _unix_to_time_sql(self: BigQuery.Generator, expression: exp.UnixToTime) -> str:
185    scale = expression.args.get("scale")
186    timestamp = expression.this
187
188    if scale in (None, exp.UnixToTime.SECONDS):
189        return self.func("TIMESTAMP_SECONDS", timestamp)
190    if scale == exp.UnixToTime.MILLIS:
191        return self.func("TIMESTAMP_MILLIS", timestamp)
192    if scale == exp.UnixToTime.MICROS:
193        return self.func("TIMESTAMP_MICROS", timestamp)
194
195    unix_seconds = exp.cast(
196        exp.Div(this=timestamp, expression=exp.func("POW", 10, scale)), exp.DataType.Type.BIGINT
197    )
198    return self.func("TIMESTAMP_SECONDS", unix_seconds)
199
200
201def _build_time(args: t.List) -> exp.Func:
202    if len(args) == 1:
203        return exp.TsOrDsToTime(this=args[0])
204    if len(args) == 2:
205        return exp.Time.from_arg_list(args)
206    return exp.TimeFromParts.from_arg_list(args)
207
208
209def _build_datetime(args: t.List) -> exp.Func:
210    if len(args) == 1:
211        return exp.TsOrDsToTimestamp.from_arg_list(args)
212    if len(args) == 2:
213        return exp.Datetime.from_arg_list(args)
214    return exp.TimestampFromParts.from_arg_list(args)
215
216
217def _str_to_datetime_sql(
218    self: BigQuery.Generator, expression: exp.StrToDate | exp.StrToTime
219) -> str:
220    this = self.sql(expression, "this")
221    dtype = "DATE" if isinstance(expression, exp.StrToDate) else "TIMESTAMP"
222
223    if expression.args.get("safe"):
224        fmt = self.format_time(
225            expression,
226            self.dialect.INVERSE_FORMAT_MAPPING,
227            self.dialect.INVERSE_FORMAT_TRIE,
228        )
229        return f"SAFE_CAST({this} AS {dtype} FORMAT {fmt})"
230
231    fmt = self.format_time(expression)
232    return self.func(f"PARSE_{dtype}", fmt, this, expression.args.get("zone"))
233
234
235class BigQuery(Dialect):
236    WEEK_OFFSET = -1
237    UNNEST_COLUMN_ONLY = True
238    SUPPORTS_USER_DEFINED_TYPES = False
239    SUPPORTS_SEMI_ANTI_JOIN = False
240    LOG_BASE_FIRST = False
241    HEX_LOWERCASE = True
242    FORCE_EARLY_ALIAS_REF_EXPANSION = True
243    EXPAND_ALIAS_REFS_EARLY_ONLY_IN_GROUP_BY = True
244
245    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
246    NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
247
248    # bigquery udfs are case sensitive
249    NORMALIZE_FUNCTIONS = False
250
251    # https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_elements_date_time
252    TIME_MAPPING = {
253        "%D": "%m/%d/%y",
254        "%E6S": "%S.%f",
255        "%e": "%-d",
256    }
257
258    FORMAT_MAPPING = {
259        "DD": "%d",
260        "MM": "%m",
261        "MON": "%b",
262        "MONTH": "%B",
263        "YYYY": "%Y",
264        "YY": "%y",
265        "HH": "%I",
266        "HH12": "%I",
267        "HH24": "%H",
268        "MI": "%M",
269        "SS": "%S",
270        "SSSSS": "%f",
271        "TZH": "%z",
272    }
273
274    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
275    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
276    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
277
278    # All set operations require either a DISTINCT or ALL specifier
279    SET_OP_DISTINCT_BY_DEFAULT = dict.fromkeys((exp.Except, exp.Intersect, exp.Union), None)
280
281    def normalize_identifier(self, expression: E) -> E:
282        if (
283            isinstance(expression, exp.Identifier)
284            and self.normalization_strategy is not NormalizationStrategy.CASE_SENSITIVE
285        ):
286            parent = expression.parent
287            while isinstance(parent, exp.Dot):
288                parent = parent.parent
289
290            # In BigQuery, CTEs are case-insensitive, but UDF and table names are case-sensitive
291            # by default. The following check uses a heuristic to detect tables based on whether
292            # they are qualified. This should generally be correct, because tables in BigQuery
293            # must be qualified with at least a dataset, unless @@dataset_id is set.
294            case_sensitive = (
295                isinstance(parent, exp.UserDefinedFunction)
296                or (
297                    isinstance(parent, exp.Table)
298                    and parent.db
299                    and (parent.meta.get("quoted_table") or not parent.meta.get("maybe_column"))
300                )
301                or expression.meta.get("is_table")
302            )
303            if not case_sensitive:
304                expression.set("this", expression.this.lower())
305
306        return expression
307
308    class Tokenizer(tokens.Tokenizer):
309        QUOTES = ["'", '"', '"""', "'''"]
310        COMMENTS = ["--", "#", ("/*", "*/")]
311        IDENTIFIERS = ["`"]
312        STRING_ESCAPES = ["\\"]
313
314        HEX_STRINGS = [("0x", ""), ("0X", "")]
315
316        BYTE_STRINGS = [
317            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
318        ]
319
320        RAW_STRINGS = [
321            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
322        ]
323
324        KEYWORDS = {
325            **tokens.Tokenizer.KEYWORDS,
326            "ANY TYPE": TokenType.VARIANT,
327            "BEGIN": TokenType.COMMAND,
328            "BEGIN TRANSACTION": TokenType.BEGIN,
329            "BYTEINT": TokenType.INT,
330            "BYTES": TokenType.BINARY,
331            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
332            "DATETIME": TokenType.TIMESTAMP,
333            "DECLARE": TokenType.COMMAND,
334            "ELSEIF": TokenType.COMMAND,
335            "EXCEPTION": TokenType.COMMAND,
336            "FLOAT64": TokenType.DOUBLE,
337            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
338            "MODEL": TokenType.MODEL,
339            "NOT DETERMINISTIC": TokenType.VOLATILE,
340            "RECORD": TokenType.STRUCT,
341            "TIMESTAMP": TokenType.TIMESTAMPTZ,
342        }
343        KEYWORDS.pop("DIV")
344        KEYWORDS.pop("VALUES")
345        KEYWORDS.pop("/*+")
346
347    class Parser(parser.Parser):
348        PREFIXED_PIVOT_COLUMNS = True
349        LOG_DEFAULTS_TO_LN = True
350        SUPPORTS_IMPLICIT_UNNEST = True
351
352        FUNCTIONS = {
353            **parser.Parser.FUNCTIONS,
354            "DATE": _build_date,
355            "DATE_ADD": build_date_delta_with_interval(exp.DateAdd),
356            "DATE_SUB": build_date_delta_with_interval(exp.DateSub),
357            "DATE_TRUNC": lambda args: exp.DateTrunc(
358                unit=exp.Literal.string(str(seq_get(args, 1))),
359                this=seq_get(args, 0),
360            ),
361            "DATETIME": _build_datetime,
362            "DATETIME_ADD": build_date_delta_with_interval(exp.DatetimeAdd),
363            "DATETIME_SUB": build_date_delta_with_interval(exp.DatetimeSub),
364            "DIV": binary_from_function(exp.IntDiv),
365            "FORMAT_DATE": lambda args: exp.TimeToStr(
366                this=exp.TsOrDsToDate(this=seq_get(args, 1)), format=seq_get(args, 0)
367            ),
368            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
369            "JSON_EXTRACT_SCALAR": lambda args: exp.JSONExtractScalar(
370                this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string("$")
371            ),
372            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
373            "MD5": exp.MD5Digest.from_arg_list,
374            "TO_HEX": _build_to_hex,
375            "PARSE_DATE": lambda args: build_formatted_time(exp.StrToDate, "bigquery")(
376                [seq_get(args, 1), seq_get(args, 0)]
377            ),
378            "PARSE_TIMESTAMP": _build_parse_timestamp,
379            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
380            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
381                this=seq_get(args, 0),
382                expression=seq_get(args, 1),
383                position=seq_get(args, 2),
384                occurrence=seq_get(args, 3),
385                group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None,
386            ),
387            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
388            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
389            "SPLIT": lambda args: exp.Split(
390                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
391                this=seq_get(args, 0),
392                expression=seq_get(args, 1) or exp.Literal.string(","),
393            ),
394            "TIME": _build_time,
395            "TIME_ADD": build_date_delta_with_interval(exp.TimeAdd),
396            "TIME_SUB": build_date_delta_with_interval(exp.TimeSub),
397            "TIMESTAMP": _build_timestamp,
398            "TIMESTAMP_ADD": build_date_delta_with_interval(exp.TimestampAdd),
399            "TIMESTAMP_SUB": build_date_delta_with_interval(exp.TimestampSub),
400            "TIMESTAMP_MICROS": lambda args: exp.UnixToTime(
401                this=seq_get(args, 0), scale=exp.UnixToTime.MICROS
402            ),
403            "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime(
404                this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS
405            ),
406            "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)),
407            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
408            "FORMAT_DATETIME": lambda args: exp.TimeToStr(
409                this=exp.TsOrDsToTimestamp(this=seq_get(args, 1)), format=seq_get(args, 0)
410            ),
411        }
412
413        FUNCTION_PARSERS = {
414            **parser.Parser.FUNCTION_PARSERS,
415            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
416        }
417        FUNCTION_PARSERS.pop("TRIM")
418
419        NO_PAREN_FUNCTIONS = {
420            **parser.Parser.NO_PAREN_FUNCTIONS,
421            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
422        }
423
424        NESTED_TYPE_TOKENS = {
425            *parser.Parser.NESTED_TYPE_TOKENS,
426            TokenType.TABLE,
427        }
428
429        PROPERTY_PARSERS = {
430            **parser.Parser.PROPERTY_PARSERS,
431            "NOT DETERMINISTIC": lambda self: self.expression(
432                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
433            ),
434            "OPTIONS": lambda self: self._parse_with_property(),
435        }
436
437        CONSTRAINT_PARSERS = {
438            **parser.Parser.CONSTRAINT_PARSERS,
439            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
440        }
441
442        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
443        RANGE_PARSERS.pop(TokenType.OVERLAPS)
444
445        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
446
447        STATEMENT_PARSERS = {
448            **parser.Parser.STATEMENT_PARSERS,
449            TokenType.ELSE: lambda self: self._parse_as_command(self._prev),
450            TokenType.END: lambda self: self._parse_as_command(self._prev),
451            TokenType.FOR: lambda self: self._parse_for_in(),
452        }
453
454        BRACKET_OFFSETS = {
455            "OFFSET": (0, False),
456            "ORDINAL": (1, False),
457            "SAFE_OFFSET": (0, True),
458            "SAFE_ORDINAL": (1, True),
459        }
460
461        def _parse_for_in(self) -> exp.ForIn:
462            this = self._parse_range()
463            self._match_text_seq("DO")
464            return self.expression(exp.ForIn, this=this, expression=self._parse_statement())
465
466        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
467            this = super()._parse_table_part(schema=schema) or self._parse_number()
468
469            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
470            if isinstance(this, exp.Identifier):
471                table_name = this.name
472                while self._match(TokenType.DASH, advance=False) and self._next:
473                    text = ""
474                    while self._curr and self._curr.token_type != TokenType.DOT:
475                        self._advance()
476                        text += self._prev.text
477                    table_name += text
478
479                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
480            elif isinstance(this, exp.Literal):
481                table_name = this.name
482
483                if self._is_connected() and self._parse_var(any_token=True):
484                    table_name += self._prev.text
485
486                this = exp.Identifier(this=table_name, quoted=True)
487
488            return this
489
490        def _parse_table_parts(
491            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
492        ) -> exp.Table:
493            table = super()._parse_table_parts(
494                schema=schema, is_db_reference=is_db_reference, wildcard=True
495            )
496
497            # proj-1.db.tbl -- `1.` is tokenized as a float so we need to unravel it here
498            if not table.catalog:
499                if table.db:
500                    parts = table.db.split(".")
501                    if len(parts) == 2 and not table.args["db"].quoted:
502                        table.set("catalog", exp.Identifier(this=parts[0]))
503                        table.set("db", exp.Identifier(this=parts[1]))
504                else:
505                    parts = table.name.split(".")
506                    if len(parts) == 2 and not table.this.quoted:
507                        table.set("db", exp.Identifier(this=parts[0]))
508                        table.set("this", exp.Identifier(this=parts[1]))
509
510            if isinstance(table.this, exp.Identifier) and any("." in p.name for p in table.parts):
511                catalog, db, this, *rest = (
512                    exp.to_identifier(p, quoted=True)
513                    for p in split_num_words(".".join(p.name for p in table.parts), ".", 3)
514                )
515
516                if rest and this:
517                    this = exp.Dot.build([this, *rest])  # type: ignore
518
519                table = exp.Table(
520                    this=this, db=db, catalog=catalog, pivots=table.args.get("pivots")
521                )
522                table.meta["quoted_table"] = True
523
524            return table
525
526        def _parse_column(self) -> t.Optional[exp.Expression]:
527            column = super()._parse_column()
528            if isinstance(column, exp.Column):
529                parts = column.parts
530                if any("." in p.name for p in parts):
531                    catalog, db, table, this, *rest = (
532                        exp.to_identifier(p, quoted=True)
533                        for p in split_num_words(".".join(p.name for p in parts), ".", 4)
534                    )
535
536                    if rest and this:
537                        this = exp.Dot.build([this, *rest])  # type: ignore
538
539                    column = exp.Column(this=this, table=table, db=db, catalog=catalog)
540                    column.meta["quoted_column"] = True
541
542            return column
543
544        @t.overload
545        def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: ...
546
547        @t.overload
548        def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: ...
549
550        def _parse_json_object(self, agg=False):
551            json_object = super()._parse_json_object()
552            array_kv_pair = seq_get(json_object.expressions, 0)
553
554            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
555            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
556            if (
557                array_kv_pair
558                and isinstance(array_kv_pair.this, exp.Array)
559                and isinstance(array_kv_pair.expression, exp.Array)
560            ):
561                keys = array_kv_pair.this.expressions
562                values = array_kv_pair.expression.expressions
563
564                json_object.set(
565                    "expressions",
566                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
567                )
568
569            return json_object
570
571        def _parse_bracket(
572            self, this: t.Optional[exp.Expression] = None
573        ) -> t.Optional[exp.Expression]:
574            bracket = super()._parse_bracket(this)
575
576            if this is bracket:
577                return bracket
578
579            if isinstance(bracket, exp.Bracket):
580                for expression in bracket.expressions:
581                    name = expression.name.upper()
582
583                    if name not in self.BRACKET_OFFSETS:
584                        break
585
586                    offset, safe = self.BRACKET_OFFSETS[name]
587                    bracket.set("offset", offset)
588                    bracket.set("safe", safe)
589                    expression.replace(expression.expressions[0])
590
591            return bracket
592
593        def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]:
594            unnest = super()._parse_unnest(with_alias=with_alias)
595
596            if not unnest:
597                return None
598
599            unnest_expr = seq_get(unnest.expressions, 0)
600            if unnest_expr:
601                from sqlglot.optimizer.annotate_types import annotate_types
602
603                unnest_expr = annotate_types(unnest_expr)
604
605                # Unnesting a nested array (i.e array of structs) explodes the top-level struct fields,
606                # in contrast to other dialects such as DuckDB which flattens only the array by default
607                if unnest_expr.is_type(exp.DataType.Type.ARRAY) and any(
608                    array_elem.is_type(exp.DataType.Type.STRUCT)
609                    for array_elem in unnest_expr._type.expressions
610                ):
611                    unnest.set("explode_array", True)
612
613            return unnest
614
615    class Generator(generator.Generator):
616        INTERVAL_ALLOWS_PLURAL_FORM = False
617        JOIN_HINTS = False
618        QUERY_HINTS = False
619        TABLE_HINTS = False
620        LIMIT_FETCH = "LIMIT"
621        RENAME_TABLE_WITH_DB = False
622        NVL2_SUPPORTED = False
623        UNNEST_WITH_ORDINALITY = False
624        COLLATE_IS_FUNC = True
625        LIMIT_ONLY_LITERALS = True
626        SUPPORTS_TABLE_ALIAS_COLUMNS = False
627        UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
628        JSON_KEY_VALUE_PAIR_SEP = ","
629        NULL_ORDERING_SUPPORTED = False
630        IGNORE_NULLS_IN_FUNC = True
631        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
632        CAN_IMPLEMENT_ARRAY_ANY = True
633        SUPPORTS_TO_NUMBER = False
634        NAMED_PLACEHOLDER_TOKEN = "@"
635        HEX_FUNC = "TO_HEX"
636        WITH_PROPERTIES_PREFIX = "OPTIONS"
637        SUPPORTS_EXPLODING_PROJECTIONS = False
638        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
639
640        TRANSFORMS = {
641            **generator.Generator.TRANSFORMS,
642            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
643            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
644            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
645            exp.Array: inline_array_unless_query,
646            exp.ArrayContains: _array_contains_sql,
647            exp.ArrayFilter: filter_array_using_unnest,
648            exp.ArraySize: rename_func("ARRAY_LENGTH"),
649            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
650            exp.CollateProperty: lambda self, e: (
651                f"DEFAULT COLLATE {self.sql(e, 'this')}"
652                if e.args.get("default")
653                else f"COLLATE {self.sql(e, 'this')}"
654            ),
655            exp.Commit: lambda *_: "COMMIT TRANSACTION",
656            exp.CountIf: rename_func("COUNTIF"),
657            exp.Create: _create_sql,
658            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
659            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
660            exp.DateDiff: lambda self, e: self.func(
661                "DATE_DIFF", e.this, e.expression, unit_to_var(e)
662            ),
663            exp.DateFromParts: rename_func("DATE"),
664            exp.DateStrToDate: datestrtodate_sql,
665            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
666            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
667            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
668            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
669            exp.FromTimeZone: lambda self, e: self.func(
670                "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'"
671            ),
672            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
673            exp.GroupConcat: rename_func("STRING_AGG"),
674            exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))),
675            exp.If: if_sql(false_value="NULL"),
676            exp.ILike: no_ilike_sql,
677            exp.IntDiv: rename_func("DIV"),
678            exp.JSONFormat: rename_func("TO_JSON_STRING"),
679            exp.Max: max_or_greatest,
680            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
681            exp.MD5Digest: rename_func("MD5"),
682            exp.Min: min_or_least,
683            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
684            exp.RegexpExtract: lambda self, e: self.func(
685                "REGEXP_EXTRACT",
686                e.this,
687                e.expression,
688                e.args.get("position"),
689                e.args.get("occurrence"),
690            ),
691            exp.RegexpReplace: regexp_replace_sql,
692            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
693            exp.ReturnsProperty: _returnsproperty_sql,
694            exp.Rollback: lambda *_: "ROLLBACK TRANSACTION",
695            exp.Select: transforms.preprocess(
696                [
697                    transforms.explode_to_unnest(),
698                    transforms.unqualify_unnest,
699                    transforms.eliminate_distinct_on,
700                    _alias_ordered_group,
701                    transforms.eliminate_semi_and_anti_joins,
702                ]
703            ),
704            exp.SHA: rename_func("SHA1"),
705            exp.SHA2: sha256_sql,
706            exp.StabilityProperty: lambda self, e: (
707                "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
708            ),
709            exp.StrToDate: _str_to_datetime_sql,
710            exp.StrToTime: _str_to_datetime_sql,
711            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
712            exp.TimeFromParts: rename_func("TIME"),
713            exp.TimestampFromParts: rename_func("DATETIME"),
714            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
715            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
716            exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"),
717            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
718            exp.TimeStrToTime: timestrtotime_sql,
719            exp.Transaction: lambda *_: "BEGIN TRANSACTION",
720            exp.TsOrDsAdd: _ts_or_ds_add_sql,
721            exp.TsOrDsDiff: _ts_or_ds_diff_sql,
722            exp.TsOrDsToTime: rename_func("TIME"),
723            exp.TsOrDsToTimestamp: rename_func("DATETIME"),
724            exp.Unhex: rename_func("FROM_HEX"),
725            exp.UnixDate: rename_func("UNIX_DATE"),
726            exp.UnixToTime: _unix_to_time_sql,
727            exp.Values: _derived_table_values_to_unnest,
728            exp.VariancePop: rename_func("VAR_POP"),
729        }
730
731        SUPPORTED_JSON_PATH_PARTS = {
732            exp.JSONPathKey,
733            exp.JSONPathRoot,
734            exp.JSONPathSubscript,
735        }
736
737        TYPE_MAPPING = {
738            **generator.Generator.TYPE_MAPPING,
739            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
740            exp.DataType.Type.BIGINT: "INT64",
741            exp.DataType.Type.BINARY: "BYTES",
742            exp.DataType.Type.BOOLEAN: "BOOL",
743            exp.DataType.Type.CHAR: "STRING",
744            exp.DataType.Type.DECIMAL: "NUMERIC",
745            exp.DataType.Type.DOUBLE: "FLOAT64",
746            exp.DataType.Type.FLOAT: "FLOAT64",
747            exp.DataType.Type.INT: "INT64",
748            exp.DataType.Type.NCHAR: "STRING",
749            exp.DataType.Type.NVARCHAR: "STRING",
750            exp.DataType.Type.SMALLINT: "INT64",
751            exp.DataType.Type.TEXT: "STRING",
752            exp.DataType.Type.TIMESTAMP: "DATETIME",
753            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
754            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
755            exp.DataType.Type.TINYINT: "INT64",
756            exp.DataType.Type.VARBINARY: "BYTES",
757            exp.DataType.Type.ROWVERSION: "BYTES",
758            exp.DataType.Type.VARCHAR: "STRING",
759            exp.DataType.Type.VARIANT: "ANY TYPE",
760        }
761
762        PROPERTIES_LOCATION = {
763            **generator.Generator.PROPERTIES_LOCATION,
764            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
765            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
766        }
767
768        # WINDOW comes after QUALIFY
769        # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#window_clause
770        AFTER_HAVING_MODIFIER_TRANSFORMS = {
771            "qualify": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["qualify"],
772            "windows": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["windows"],
773        }
774
775        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
776        RESERVED_KEYWORDS = {
777            "all",
778            "and",
779            "any",
780            "array",
781            "as",
782            "asc",
783            "assert_rows_modified",
784            "at",
785            "between",
786            "by",
787            "case",
788            "cast",
789            "collate",
790            "contains",
791            "create",
792            "cross",
793            "cube",
794            "current",
795            "default",
796            "define",
797            "desc",
798            "distinct",
799            "else",
800            "end",
801            "enum",
802            "escape",
803            "except",
804            "exclude",
805            "exists",
806            "extract",
807            "false",
808            "fetch",
809            "following",
810            "for",
811            "from",
812            "full",
813            "group",
814            "grouping",
815            "groups",
816            "hash",
817            "having",
818            "if",
819            "ignore",
820            "in",
821            "inner",
822            "intersect",
823            "interval",
824            "into",
825            "is",
826            "join",
827            "lateral",
828            "left",
829            "like",
830            "limit",
831            "lookup",
832            "merge",
833            "natural",
834            "new",
835            "no",
836            "not",
837            "null",
838            "nulls",
839            "of",
840            "on",
841            "or",
842            "order",
843            "outer",
844            "over",
845            "partition",
846            "preceding",
847            "proto",
848            "qualify",
849            "range",
850            "recursive",
851            "respect",
852            "right",
853            "rollup",
854            "rows",
855            "select",
856            "set",
857            "some",
858            "struct",
859            "tablesample",
860            "then",
861            "to",
862            "treat",
863            "true",
864            "unbounded",
865            "union",
866            "unnest",
867            "using",
868            "when",
869            "where",
870            "window",
871            "with",
872            "within",
873        }
874
875        def mod_sql(self, expression: exp.Mod) -> str:
876            this = expression.this
877            expr = expression.expression
878            return self.func(
879                "MOD",
880                this.unnest() if isinstance(this, exp.Paren) else this,
881                expr.unnest() if isinstance(expr, exp.Paren) else expr,
882            )
883
884        def column_parts(self, expression: exp.Column) -> str:
885            if expression.meta.get("quoted_column"):
886                # If a column reference is of the form `dataset.table`.name, we need
887                # to preserve the quoted table path, otherwise the reference breaks
888                table_parts = ".".join(p.name for p in expression.parts[:-1])
889                table_path = self.sql(exp.Identifier(this=table_parts, quoted=True))
890                return f"{table_path}.{self.sql(expression, 'this')}"
891
892            return super().column_parts(expression)
893
894        def table_parts(self, expression: exp.Table) -> str:
895            # Depending on the context, `x.y` may not resolve to the same data source as `x`.`y`, so
896            # we need to make sure the correct quoting is used in each case.
897            #
898            # For example, if there is a CTE x that clashes with a schema name, then the former will
899            # return the table y in that schema, whereas the latter will return the CTE's y column:
900            #
901            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x.y`   -> cross join
902            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x`.`y` -> implicit unnest
903            if expression.meta.get("quoted_table"):
904                table_parts = ".".join(p.name for p in expression.parts)
905                return self.sql(exp.Identifier(this=table_parts, quoted=True))
906
907            return super().table_parts(expression)
908
909        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
910            if isinstance(expression.this, exp.TsOrDsToTimestamp):
911                func_name = "FORMAT_DATETIME"
912            else:
913                func_name = "FORMAT_DATE"
914            this = (
915                expression.this
916                if isinstance(expression.this, (exp.TsOrDsToTimestamp, exp.TsOrDsToDate))
917                else expression
918            )
919            return self.func(func_name, self.format_time(expression), this.this)
920
921        def eq_sql(self, expression: exp.EQ) -> str:
922            # Operands of = cannot be NULL in BigQuery
923            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
924                if not isinstance(expression.parent, exp.Update):
925                    return "NULL"
926
927            return self.binary(expression, "=")
928
929        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
930            parent = expression.parent
931
932            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
933            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
934            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
935                return self.func(
936                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
937                )
938
939            return super().attimezone_sql(expression)
940
941        def trycast_sql(self, expression: exp.TryCast) -> str:
942            return self.cast_sql(expression, safe_prefix="SAFE_")
943
944        def bracket_sql(self, expression: exp.Bracket) -> str:
945            this = expression.this
946            expressions = expression.expressions
947
948            if len(expressions) == 1 and this and this.is_type(exp.DataType.Type.STRUCT):
949                arg = expressions[0]
950                if arg.type is None:
951                    from sqlglot.optimizer.annotate_types import annotate_types
952
953                    arg = annotate_types(arg)
954
955                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
956                    # BQ doesn't support bracket syntax with string values for structs
957                    return f"{self.sql(this)}.{arg.name}"
958
959            expressions_sql = self.expressions(expression, flat=True)
960            offset = expression.args.get("offset")
961
962            if offset == 0:
963                expressions_sql = f"OFFSET({expressions_sql})"
964            elif offset == 1:
965                expressions_sql = f"ORDINAL({expressions_sql})"
966            elif offset is not None:
967                self.unsupported(f"Unsupported array offset: {offset}")
968
969            if expression.args.get("safe"):
970                expressions_sql = f"SAFE_{expressions_sql}"
971
972            return f"{self.sql(this)}[{expressions_sql}]"
973
974        def in_unnest_op(self, expression: exp.Unnest) -> str:
975            return self.sql(expression)
976
977        def version_sql(self, expression: exp.Version) -> str:
978            if expression.name == "TIMESTAMP":
979                expression.set("this", "SYSTEM_TIME")
980            return super().version_sql(expression)
logger = <Logger sqlglot (WARNING)>
class BigQuery(sqlglot.dialects.dialect.Dialect):
236class BigQuery(Dialect):
237    WEEK_OFFSET = -1
238    UNNEST_COLUMN_ONLY = True
239    SUPPORTS_USER_DEFINED_TYPES = False
240    SUPPORTS_SEMI_ANTI_JOIN = False
241    LOG_BASE_FIRST = False
242    HEX_LOWERCASE = True
243    FORCE_EARLY_ALIAS_REF_EXPANSION = True
244    EXPAND_ALIAS_REFS_EARLY_ONLY_IN_GROUP_BY = True
245
246    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
247    NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
248
249    # bigquery udfs are case sensitive
250    NORMALIZE_FUNCTIONS = False
251
252    # https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_elements_date_time
253    TIME_MAPPING = {
254        "%D": "%m/%d/%y",
255        "%E6S": "%S.%f",
256        "%e": "%-d",
257    }
258
259    FORMAT_MAPPING = {
260        "DD": "%d",
261        "MM": "%m",
262        "MON": "%b",
263        "MONTH": "%B",
264        "YYYY": "%Y",
265        "YY": "%y",
266        "HH": "%I",
267        "HH12": "%I",
268        "HH24": "%H",
269        "MI": "%M",
270        "SS": "%S",
271        "SSSSS": "%f",
272        "TZH": "%z",
273    }
274
275    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
276    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
277    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
278
279    # All set operations require either a DISTINCT or ALL specifier
280    SET_OP_DISTINCT_BY_DEFAULT = dict.fromkeys((exp.Except, exp.Intersect, exp.Union), None)
281
282    def normalize_identifier(self, expression: E) -> E:
283        if (
284            isinstance(expression, exp.Identifier)
285            and self.normalization_strategy is not NormalizationStrategy.CASE_SENSITIVE
286        ):
287            parent = expression.parent
288            while isinstance(parent, exp.Dot):
289                parent = parent.parent
290
291            # In BigQuery, CTEs are case-insensitive, but UDF and table names are case-sensitive
292            # by default. The following check uses a heuristic to detect tables based on whether
293            # they are qualified. This should generally be correct, because tables in BigQuery
294            # must be qualified with at least a dataset, unless @@dataset_id is set.
295            case_sensitive = (
296                isinstance(parent, exp.UserDefinedFunction)
297                or (
298                    isinstance(parent, exp.Table)
299                    and parent.db
300                    and (parent.meta.get("quoted_table") or not parent.meta.get("maybe_column"))
301                )
302                or expression.meta.get("is_table")
303            )
304            if not case_sensitive:
305                expression.set("this", expression.this.lower())
306
307        return expression
308
309    class Tokenizer(tokens.Tokenizer):
310        QUOTES = ["'", '"', '"""', "'''"]
311        COMMENTS = ["--", "#", ("/*", "*/")]
312        IDENTIFIERS = ["`"]
313        STRING_ESCAPES = ["\\"]
314
315        HEX_STRINGS = [("0x", ""), ("0X", "")]
316
317        BYTE_STRINGS = [
318            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
319        ]
320
321        RAW_STRINGS = [
322            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
323        ]
324
325        KEYWORDS = {
326            **tokens.Tokenizer.KEYWORDS,
327            "ANY TYPE": TokenType.VARIANT,
328            "BEGIN": TokenType.COMMAND,
329            "BEGIN TRANSACTION": TokenType.BEGIN,
330            "BYTEINT": TokenType.INT,
331            "BYTES": TokenType.BINARY,
332            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
333            "DATETIME": TokenType.TIMESTAMP,
334            "DECLARE": TokenType.COMMAND,
335            "ELSEIF": TokenType.COMMAND,
336            "EXCEPTION": TokenType.COMMAND,
337            "FLOAT64": TokenType.DOUBLE,
338            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
339            "MODEL": TokenType.MODEL,
340            "NOT DETERMINISTIC": TokenType.VOLATILE,
341            "RECORD": TokenType.STRUCT,
342            "TIMESTAMP": TokenType.TIMESTAMPTZ,
343        }
344        KEYWORDS.pop("DIV")
345        KEYWORDS.pop("VALUES")
346        KEYWORDS.pop("/*+")
347
348    class Parser(parser.Parser):
349        PREFIXED_PIVOT_COLUMNS = True
350        LOG_DEFAULTS_TO_LN = True
351        SUPPORTS_IMPLICIT_UNNEST = True
352
353        FUNCTIONS = {
354            **parser.Parser.FUNCTIONS,
355            "DATE": _build_date,
356            "DATE_ADD": build_date_delta_with_interval(exp.DateAdd),
357            "DATE_SUB": build_date_delta_with_interval(exp.DateSub),
358            "DATE_TRUNC": lambda args: exp.DateTrunc(
359                unit=exp.Literal.string(str(seq_get(args, 1))),
360                this=seq_get(args, 0),
361            ),
362            "DATETIME": _build_datetime,
363            "DATETIME_ADD": build_date_delta_with_interval(exp.DatetimeAdd),
364            "DATETIME_SUB": build_date_delta_with_interval(exp.DatetimeSub),
365            "DIV": binary_from_function(exp.IntDiv),
366            "FORMAT_DATE": lambda args: exp.TimeToStr(
367                this=exp.TsOrDsToDate(this=seq_get(args, 1)), format=seq_get(args, 0)
368            ),
369            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
370            "JSON_EXTRACT_SCALAR": lambda args: exp.JSONExtractScalar(
371                this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string("$")
372            ),
373            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
374            "MD5": exp.MD5Digest.from_arg_list,
375            "TO_HEX": _build_to_hex,
376            "PARSE_DATE": lambda args: build_formatted_time(exp.StrToDate, "bigquery")(
377                [seq_get(args, 1), seq_get(args, 0)]
378            ),
379            "PARSE_TIMESTAMP": _build_parse_timestamp,
380            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
381            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
382                this=seq_get(args, 0),
383                expression=seq_get(args, 1),
384                position=seq_get(args, 2),
385                occurrence=seq_get(args, 3),
386                group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None,
387            ),
388            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
389            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
390            "SPLIT": lambda args: exp.Split(
391                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
392                this=seq_get(args, 0),
393                expression=seq_get(args, 1) or exp.Literal.string(","),
394            ),
395            "TIME": _build_time,
396            "TIME_ADD": build_date_delta_with_interval(exp.TimeAdd),
397            "TIME_SUB": build_date_delta_with_interval(exp.TimeSub),
398            "TIMESTAMP": _build_timestamp,
399            "TIMESTAMP_ADD": build_date_delta_with_interval(exp.TimestampAdd),
400            "TIMESTAMP_SUB": build_date_delta_with_interval(exp.TimestampSub),
401            "TIMESTAMP_MICROS": lambda args: exp.UnixToTime(
402                this=seq_get(args, 0), scale=exp.UnixToTime.MICROS
403            ),
404            "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime(
405                this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS
406            ),
407            "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)),
408            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
409            "FORMAT_DATETIME": lambda args: exp.TimeToStr(
410                this=exp.TsOrDsToTimestamp(this=seq_get(args, 1)), format=seq_get(args, 0)
411            ),
412        }
413
414        FUNCTION_PARSERS = {
415            **parser.Parser.FUNCTION_PARSERS,
416            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
417        }
418        FUNCTION_PARSERS.pop("TRIM")
419
420        NO_PAREN_FUNCTIONS = {
421            **parser.Parser.NO_PAREN_FUNCTIONS,
422            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
423        }
424
425        NESTED_TYPE_TOKENS = {
426            *parser.Parser.NESTED_TYPE_TOKENS,
427            TokenType.TABLE,
428        }
429
430        PROPERTY_PARSERS = {
431            **parser.Parser.PROPERTY_PARSERS,
432            "NOT DETERMINISTIC": lambda self: self.expression(
433                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
434            ),
435            "OPTIONS": lambda self: self._parse_with_property(),
436        }
437
438        CONSTRAINT_PARSERS = {
439            **parser.Parser.CONSTRAINT_PARSERS,
440            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
441        }
442
443        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
444        RANGE_PARSERS.pop(TokenType.OVERLAPS)
445
446        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
447
448        STATEMENT_PARSERS = {
449            **parser.Parser.STATEMENT_PARSERS,
450            TokenType.ELSE: lambda self: self._parse_as_command(self._prev),
451            TokenType.END: lambda self: self._parse_as_command(self._prev),
452            TokenType.FOR: lambda self: self._parse_for_in(),
453        }
454
455        BRACKET_OFFSETS = {
456            "OFFSET": (0, False),
457            "ORDINAL": (1, False),
458            "SAFE_OFFSET": (0, True),
459            "SAFE_ORDINAL": (1, True),
460        }
461
462        def _parse_for_in(self) -> exp.ForIn:
463            this = self._parse_range()
464            self._match_text_seq("DO")
465            return self.expression(exp.ForIn, this=this, expression=self._parse_statement())
466
467        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
468            this = super()._parse_table_part(schema=schema) or self._parse_number()
469
470            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
471            if isinstance(this, exp.Identifier):
472                table_name = this.name
473                while self._match(TokenType.DASH, advance=False) and self._next:
474                    text = ""
475                    while self._curr and self._curr.token_type != TokenType.DOT:
476                        self._advance()
477                        text += self._prev.text
478                    table_name += text
479
480                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
481            elif isinstance(this, exp.Literal):
482                table_name = this.name
483
484                if self._is_connected() and self._parse_var(any_token=True):
485                    table_name += self._prev.text
486
487                this = exp.Identifier(this=table_name, quoted=True)
488
489            return this
490
491        def _parse_table_parts(
492            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
493        ) -> exp.Table:
494            table = super()._parse_table_parts(
495                schema=schema, is_db_reference=is_db_reference, wildcard=True
496            )
497
498            # proj-1.db.tbl -- `1.` is tokenized as a float so we need to unravel it here
499            if not table.catalog:
500                if table.db:
501                    parts = table.db.split(".")
502                    if len(parts) == 2 and not table.args["db"].quoted:
503                        table.set("catalog", exp.Identifier(this=parts[0]))
504                        table.set("db", exp.Identifier(this=parts[1]))
505                else:
506                    parts = table.name.split(".")
507                    if len(parts) == 2 and not table.this.quoted:
508                        table.set("db", exp.Identifier(this=parts[0]))
509                        table.set("this", exp.Identifier(this=parts[1]))
510
511            if isinstance(table.this, exp.Identifier) and any("." in p.name for p in table.parts):
512                catalog, db, this, *rest = (
513                    exp.to_identifier(p, quoted=True)
514                    for p in split_num_words(".".join(p.name for p in table.parts), ".", 3)
515                )
516
517                if rest and this:
518                    this = exp.Dot.build([this, *rest])  # type: ignore
519
520                table = exp.Table(
521                    this=this, db=db, catalog=catalog, pivots=table.args.get("pivots")
522                )
523                table.meta["quoted_table"] = True
524
525            return table
526
527        def _parse_column(self) -> t.Optional[exp.Expression]:
528            column = super()._parse_column()
529            if isinstance(column, exp.Column):
530                parts = column.parts
531                if any("." in p.name for p in parts):
532                    catalog, db, table, this, *rest = (
533                        exp.to_identifier(p, quoted=True)
534                        for p in split_num_words(".".join(p.name for p in parts), ".", 4)
535                    )
536
537                    if rest and this:
538                        this = exp.Dot.build([this, *rest])  # type: ignore
539
540                    column = exp.Column(this=this, table=table, db=db, catalog=catalog)
541                    column.meta["quoted_column"] = True
542
543            return column
544
545        @t.overload
546        def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: ...
547
548        @t.overload
549        def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: ...
550
551        def _parse_json_object(self, agg=False):
552            json_object = super()._parse_json_object()
553            array_kv_pair = seq_get(json_object.expressions, 0)
554
555            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
556            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
557            if (
558                array_kv_pair
559                and isinstance(array_kv_pair.this, exp.Array)
560                and isinstance(array_kv_pair.expression, exp.Array)
561            ):
562                keys = array_kv_pair.this.expressions
563                values = array_kv_pair.expression.expressions
564
565                json_object.set(
566                    "expressions",
567                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
568                )
569
570            return json_object
571
572        def _parse_bracket(
573            self, this: t.Optional[exp.Expression] = None
574        ) -> t.Optional[exp.Expression]:
575            bracket = super()._parse_bracket(this)
576
577            if this is bracket:
578                return bracket
579
580            if isinstance(bracket, exp.Bracket):
581                for expression in bracket.expressions:
582                    name = expression.name.upper()
583
584                    if name not in self.BRACKET_OFFSETS:
585                        break
586
587                    offset, safe = self.BRACKET_OFFSETS[name]
588                    bracket.set("offset", offset)
589                    bracket.set("safe", safe)
590                    expression.replace(expression.expressions[0])
591
592            return bracket
593
594        def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]:
595            unnest = super()._parse_unnest(with_alias=with_alias)
596
597            if not unnest:
598                return None
599
600            unnest_expr = seq_get(unnest.expressions, 0)
601            if unnest_expr:
602                from sqlglot.optimizer.annotate_types import annotate_types
603
604                unnest_expr = annotate_types(unnest_expr)
605
606                # Unnesting a nested array (i.e array of structs) explodes the top-level struct fields,
607                # in contrast to other dialects such as DuckDB which flattens only the array by default
608                if unnest_expr.is_type(exp.DataType.Type.ARRAY) and any(
609                    array_elem.is_type(exp.DataType.Type.STRUCT)
610                    for array_elem in unnest_expr._type.expressions
611                ):
612                    unnest.set("explode_array", True)
613
614            return unnest
615
616    class Generator(generator.Generator):
617        INTERVAL_ALLOWS_PLURAL_FORM = False
618        JOIN_HINTS = False
619        QUERY_HINTS = False
620        TABLE_HINTS = False
621        LIMIT_FETCH = "LIMIT"
622        RENAME_TABLE_WITH_DB = False
623        NVL2_SUPPORTED = False
624        UNNEST_WITH_ORDINALITY = False
625        COLLATE_IS_FUNC = True
626        LIMIT_ONLY_LITERALS = True
627        SUPPORTS_TABLE_ALIAS_COLUMNS = False
628        UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
629        JSON_KEY_VALUE_PAIR_SEP = ","
630        NULL_ORDERING_SUPPORTED = False
631        IGNORE_NULLS_IN_FUNC = True
632        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
633        CAN_IMPLEMENT_ARRAY_ANY = True
634        SUPPORTS_TO_NUMBER = False
635        NAMED_PLACEHOLDER_TOKEN = "@"
636        HEX_FUNC = "TO_HEX"
637        WITH_PROPERTIES_PREFIX = "OPTIONS"
638        SUPPORTS_EXPLODING_PROJECTIONS = False
639        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
640
641        TRANSFORMS = {
642            **generator.Generator.TRANSFORMS,
643            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
644            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
645            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
646            exp.Array: inline_array_unless_query,
647            exp.ArrayContains: _array_contains_sql,
648            exp.ArrayFilter: filter_array_using_unnest,
649            exp.ArraySize: rename_func("ARRAY_LENGTH"),
650            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
651            exp.CollateProperty: lambda self, e: (
652                f"DEFAULT COLLATE {self.sql(e, 'this')}"
653                if e.args.get("default")
654                else f"COLLATE {self.sql(e, 'this')}"
655            ),
656            exp.Commit: lambda *_: "COMMIT TRANSACTION",
657            exp.CountIf: rename_func("COUNTIF"),
658            exp.Create: _create_sql,
659            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
660            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
661            exp.DateDiff: lambda self, e: self.func(
662                "DATE_DIFF", e.this, e.expression, unit_to_var(e)
663            ),
664            exp.DateFromParts: rename_func("DATE"),
665            exp.DateStrToDate: datestrtodate_sql,
666            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
667            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
668            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
669            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
670            exp.FromTimeZone: lambda self, e: self.func(
671                "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'"
672            ),
673            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
674            exp.GroupConcat: rename_func("STRING_AGG"),
675            exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))),
676            exp.If: if_sql(false_value="NULL"),
677            exp.ILike: no_ilike_sql,
678            exp.IntDiv: rename_func("DIV"),
679            exp.JSONFormat: rename_func("TO_JSON_STRING"),
680            exp.Max: max_or_greatest,
681            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
682            exp.MD5Digest: rename_func("MD5"),
683            exp.Min: min_or_least,
684            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
685            exp.RegexpExtract: lambda self, e: self.func(
686                "REGEXP_EXTRACT",
687                e.this,
688                e.expression,
689                e.args.get("position"),
690                e.args.get("occurrence"),
691            ),
692            exp.RegexpReplace: regexp_replace_sql,
693            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
694            exp.ReturnsProperty: _returnsproperty_sql,
695            exp.Rollback: lambda *_: "ROLLBACK TRANSACTION",
696            exp.Select: transforms.preprocess(
697                [
698                    transforms.explode_to_unnest(),
699                    transforms.unqualify_unnest,
700                    transforms.eliminate_distinct_on,
701                    _alias_ordered_group,
702                    transforms.eliminate_semi_and_anti_joins,
703                ]
704            ),
705            exp.SHA: rename_func("SHA1"),
706            exp.SHA2: sha256_sql,
707            exp.StabilityProperty: lambda self, e: (
708                "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
709            ),
710            exp.StrToDate: _str_to_datetime_sql,
711            exp.StrToTime: _str_to_datetime_sql,
712            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
713            exp.TimeFromParts: rename_func("TIME"),
714            exp.TimestampFromParts: rename_func("DATETIME"),
715            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
716            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
717            exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"),
718            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
719            exp.TimeStrToTime: timestrtotime_sql,
720            exp.Transaction: lambda *_: "BEGIN TRANSACTION",
721            exp.TsOrDsAdd: _ts_or_ds_add_sql,
722            exp.TsOrDsDiff: _ts_or_ds_diff_sql,
723            exp.TsOrDsToTime: rename_func("TIME"),
724            exp.TsOrDsToTimestamp: rename_func("DATETIME"),
725            exp.Unhex: rename_func("FROM_HEX"),
726            exp.UnixDate: rename_func("UNIX_DATE"),
727            exp.UnixToTime: _unix_to_time_sql,
728            exp.Values: _derived_table_values_to_unnest,
729            exp.VariancePop: rename_func("VAR_POP"),
730        }
731
732        SUPPORTED_JSON_PATH_PARTS = {
733            exp.JSONPathKey,
734            exp.JSONPathRoot,
735            exp.JSONPathSubscript,
736        }
737
738        TYPE_MAPPING = {
739            **generator.Generator.TYPE_MAPPING,
740            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
741            exp.DataType.Type.BIGINT: "INT64",
742            exp.DataType.Type.BINARY: "BYTES",
743            exp.DataType.Type.BOOLEAN: "BOOL",
744            exp.DataType.Type.CHAR: "STRING",
745            exp.DataType.Type.DECIMAL: "NUMERIC",
746            exp.DataType.Type.DOUBLE: "FLOAT64",
747            exp.DataType.Type.FLOAT: "FLOAT64",
748            exp.DataType.Type.INT: "INT64",
749            exp.DataType.Type.NCHAR: "STRING",
750            exp.DataType.Type.NVARCHAR: "STRING",
751            exp.DataType.Type.SMALLINT: "INT64",
752            exp.DataType.Type.TEXT: "STRING",
753            exp.DataType.Type.TIMESTAMP: "DATETIME",
754            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
755            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
756            exp.DataType.Type.TINYINT: "INT64",
757            exp.DataType.Type.VARBINARY: "BYTES",
758            exp.DataType.Type.ROWVERSION: "BYTES",
759            exp.DataType.Type.VARCHAR: "STRING",
760            exp.DataType.Type.VARIANT: "ANY TYPE",
761        }
762
763        PROPERTIES_LOCATION = {
764            **generator.Generator.PROPERTIES_LOCATION,
765            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
766            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
767        }
768
769        # WINDOW comes after QUALIFY
770        # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#window_clause
771        AFTER_HAVING_MODIFIER_TRANSFORMS = {
772            "qualify": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["qualify"],
773            "windows": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["windows"],
774        }
775
776        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
777        RESERVED_KEYWORDS = {
778            "all",
779            "and",
780            "any",
781            "array",
782            "as",
783            "asc",
784            "assert_rows_modified",
785            "at",
786            "between",
787            "by",
788            "case",
789            "cast",
790            "collate",
791            "contains",
792            "create",
793            "cross",
794            "cube",
795            "current",
796            "default",
797            "define",
798            "desc",
799            "distinct",
800            "else",
801            "end",
802            "enum",
803            "escape",
804            "except",
805            "exclude",
806            "exists",
807            "extract",
808            "false",
809            "fetch",
810            "following",
811            "for",
812            "from",
813            "full",
814            "group",
815            "grouping",
816            "groups",
817            "hash",
818            "having",
819            "if",
820            "ignore",
821            "in",
822            "inner",
823            "intersect",
824            "interval",
825            "into",
826            "is",
827            "join",
828            "lateral",
829            "left",
830            "like",
831            "limit",
832            "lookup",
833            "merge",
834            "natural",
835            "new",
836            "no",
837            "not",
838            "null",
839            "nulls",
840            "of",
841            "on",
842            "or",
843            "order",
844            "outer",
845            "over",
846            "partition",
847            "preceding",
848            "proto",
849            "qualify",
850            "range",
851            "recursive",
852            "respect",
853            "right",
854            "rollup",
855            "rows",
856            "select",
857            "set",
858            "some",
859            "struct",
860            "tablesample",
861            "then",
862            "to",
863            "treat",
864            "true",
865            "unbounded",
866            "union",
867            "unnest",
868            "using",
869            "when",
870            "where",
871            "window",
872            "with",
873            "within",
874        }
875
876        def mod_sql(self, expression: exp.Mod) -> str:
877            this = expression.this
878            expr = expression.expression
879            return self.func(
880                "MOD",
881                this.unnest() if isinstance(this, exp.Paren) else this,
882                expr.unnest() if isinstance(expr, exp.Paren) else expr,
883            )
884
885        def column_parts(self, expression: exp.Column) -> str:
886            if expression.meta.get("quoted_column"):
887                # If a column reference is of the form `dataset.table`.name, we need
888                # to preserve the quoted table path, otherwise the reference breaks
889                table_parts = ".".join(p.name for p in expression.parts[:-1])
890                table_path = self.sql(exp.Identifier(this=table_parts, quoted=True))
891                return f"{table_path}.{self.sql(expression, 'this')}"
892
893            return super().column_parts(expression)
894
895        def table_parts(self, expression: exp.Table) -> str:
896            # Depending on the context, `x.y` may not resolve to the same data source as `x`.`y`, so
897            # we need to make sure the correct quoting is used in each case.
898            #
899            # For example, if there is a CTE x that clashes with a schema name, then the former will
900            # return the table y in that schema, whereas the latter will return the CTE's y column:
901            #
902            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x.y`   -> cross join
903            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x`.`y` -> implicit unnest
904            if expression.meta.get("quoted_table"):
905                table_parts = ".".join(p.name for p in expression.parts)
906                return self.sql(exp.Identifier(this=table_parts, quoted=True))
907
908            return super().table_parts(expression)
909
910        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
911            if isinstance(expression.this, exp.TsOrDsToTimestamp):
912                func_name = "FORMAT_DATETIME"
913            else:
914                func_name = "FORMAT_DATE"
915            this = (
916                expression.this
917                if isinstance(expression.this, (exp.TsOrDsToTimestamp, exp.TsOrDsToDate))
918                else expression
919            )
920            return self.func(func_name, self.format_time(expression), this.this)
921
922        def eq_sql(self, expression: exp.EQ) -> str:
923            # Operands of = cannot be NULL in BigQuery
924            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
925                if not isinstance(expression.parent, exp.Update):
926                    return "NULL"
927
928            return self.binary(expression, "=")
929
930        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
931            parent = expression.parent
932
933            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
934            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
935            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
936                return self.func(
937                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
938                )
939
940            return super().attimezone_sql(expression)
941
942        def trycast_sql(self, expression: exp.TryCast) -> str:
943            return self.cast_sql(expression, safe_prefix="SAFE_")
944
945        def bracket_sql(self, expression: exp.Bracket) -> str:
946            this = expression.this
947            expressions = expression.expressions
948
949            if len(expressions) == 1 and this and this.is_type(exp.DataType.Type.STRUCT):
950                arg = expressions[0]
951                if arg.type is None:
952                    from sqlglot.optimizer.annotate_types import annotate_types
953
954                    arg = annotate_types(arg)
955
956                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
957                    # BQ doesn't support bracket syntax with string values for structs
958                    return f"{self.sql(this)}.{arg.name}"
959
960            expressions_sql = self.expressions(expression, flat=True)
961            offset = expression.args.get("offset")
962
963            if offset == 0:
964                expressions_sql = f"OFFSET({expressions_sql})"
965            elif offset == 1:
966                expressions_sql = f"ORDINAL({expressions_sql})"
967            elif offset is not None:
968                self.unsupported(f"Unsupported array offset: {offset}")
969
970            if expression.args.get("safe"):
971                expressions_sql = f"SAFE_{expressions_sql}"
972
973            return f"{self.sql(this)}[{expressions_sql}]"
974
975        def in_unnest_op(self, expression: exp.Unnest) -> str:
976            return self.sql(expression)
977
978        def version_sql(self, expression: exp.Version) -> str:
979            if expression.name == "TIMESTAMP":
980                expression.set("this", "SYSTEM_TIME")
981            return super().version_sql(expression)
WEEK_OFFSET = -1

First day of the week in DATE_TRUNC(week). Defaults to 0 (Monday). -1 would be Sunday.

UNNEST_COLUMN_ONLY = True

Whether UNNEST table aliases are treated as column aliases.

SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

LOG_BASE_FIRST: Optional[bool] = False

Whether the base comes first in the LOG function. Possible values: True, False, None (two arguments are not supported by LOG)

HEX_LOWERCASE = True

Whether the HEX function returns a lowercase hexadecimal string.

FORCE_EARLY_ALIAS_REF_EXPANSION = True

Whether alias reference expansion (_expand_alias_refs()) should run before column qualification (_qualify_columns()).

For example:

WITH data AS ( SELECT 1 AS id, 2 AS my_id ) SELECT id AS my_id FROM data WHERE my_id = 1 GROUP BY my_id, HAVING my_id = 1

In most dialects, "my_id" would refer to "data.my_id" across the query, except: - BigQuery, which will forward the alias to GROUP BY + HAVING clauses i.e it resolves to "WHERE my_id = 1 GROUP BY id HAVING id = 1" - Clickhouse, which will forward the alias across the query i.e it resolves to "WHERE id = 1 GROUP BY id HAVING id = 1"

EXPAND_ALIAS_REFS_EARLY_ONLY_IN_GROUP_BY = True

Whether alias reference expansion before qualification should only happen for the GROUP BY clause.

NORMALIZATION_STRATEGY = <NormalizationStrategy.CASE_INSENSITIVE: 'CASE_INSENSITIVE'>

Specifies the strategy according to which identifiers should be normalized.

NORMALIZE_FUNCTIONS: bool | str = False

Determines how function names are going to be normalized.

Possible values:

"upper" or True: Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.

TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y', '%E6S': '%S.%f', '%e': '%-d'}

Associates this dialect's time formats with their equivalent Python strftime formats.

FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}

Helper which is used for parsing the special syntax CAST(x AS DATE FORMAT 'yyyy'). If empty, the corresponding trie will be constructed off of TIME_MAPPING.

PSEUDOCOLUMNS: Set[str] = {'_PARTITIONDATE', '_PARTITIONTIME'}

Columns that are auto-generated by the engine corresponding to this dialect. For example, such columns may be excluded from SELECT * queries.

SET_OP_DISTINCT_BY_DEFAULT: Dict[Type[sqlglot.expressions.Expression], Optional[bool]] = {<class 'sqlglot.expressions.Except'>: None, <class 'sqlglot.expressions.Intersect'>: None, <class 'sqlglot.expressions.Union'>: None}

Whether a set operation uses DISTINCT by default. This is None when either DISTINCT or ALL must be explicitly specified.

def normalize_identifier(self, expression: ~E) -> ~E:
282    def normalize_identifier(self, expression: E) -> E:
283        if (
284            isinstance(expression, exp.Identifier)
285            and self.normalization_strategy is not NormalizationStrategy.CASE_SENSITIVE
286        ):
287            parent = expression.parent
288            while isinstance(parent, exp.Dot):
289                parent = parent.parent
290
291            # In BigQuery, CTEs are case-insensitive, but UDF and table names are case-sensitive
292            # by default. The following check uses a heuristic to detect tables based on whether
293            # they are qualified. This should generally be correct, because tables in BigQuery
294            # must be qualified with at least a dataset, unless @@dataset_id is set.
295            case_sensitive = (
296                isinstance(parent, exp.UserDefinedFunction)
297                or (
298                    isinstance(parent, exp.Table)
299                    and parent.db
300                    and (parent.meta.get("quoted_table") or not parent.meta.get("maybe_column"))
301                )
302                or expression.meta.get("is_table")
303            )
304            if not case_sensitive:
305                expression.set("this", expression.this.lower())
306
307        return expression

Transforms an identifier in a way that resembles how it'd be resolved by this dialect.

For example, an identifier like FoO would be resolved as foo in Postgres, because it lowercases all unquoted identifiers. On the other hand, Snowflake uppercases them, so it would resolve it as FOO. If it was quoted, it'd need to be treated as case-sensitive, and so any normalization would be prohibited in order to avoid "breaking" the identifier.

There are also dialects like Spark, which are case-insensitive even when quotes are present, and dialects like MySQL, whose resolution rules match those employed by the underlying operating system, for example they may always be case-sensitive in Linux.

Finally, the normalization behavior of some engines can even be controlled through flags, like in Redshift's case, where users can explicitly set enable_case_sensitive_identifier.

SQLGlot aims to understand and handle all of these different behaviors gracefully, so that it can analyze queries in the optimizer and successfully capture their semantics.

SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'BigQuery.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.tokens.JSONPathTokenizer'>
parser_class = <class 'BigQuery.Parser'>
generator_class = <class 'BigQuery.Generator'>
TIME_TRIE: Dict = {'%': {'D': {0: True}, 'E': {'6': {'S': {0: True}}}, 'e': {0: True}}}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D', '%S.%f': '%E6S', '%-d': '%e'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}, 'S': {'.': {'%': {'f': {0: True}}}}, '-': {'d': {0: True}}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {'%d': 'DD', '%m': 'MM', '%b': 'MON', '%B': 'MONTH', '%Y': 'YYYY', '%y': 'YY', '%I': 'HH12', '%H': 'HH24', '%M': 'MI', '%S': 'SS', '%f': 'SSSSS', '%z': 'TZH'}
INVERSE_FORMAT_TRIE: Dict = {'%': {'d': {0: True}, 'm': {0: True}, 'b': {0: True}, 'B': {0: True}, 'Y': {0: True}, 'y': {0: True}, 'I': {0: True}, 'H': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}, 'z': {0: True}}}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = '0x'
HEX_END: Optional[str] = ''
BYTE_START: Optional[str] = "b'"
BYTE_END: Optional[str] = "'"
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class BigQuery.Tokenizer(sqlglot.tokens.Tokenizer):
309    class Tokenizer(tokens.Tokenizer):
310        QUOTES = ["'", '"', '"""', "'''"]
311        COMMENTS = ["--", "#", ("/*", "*/")]
312        IDENTIFIERS = ["`"]
313        STRING_ESCAPES = ["\\"]
314
315        HEX_STRINGS = [("0x", ""), ("0X", "")]
316
317        BYTE_STRINGS = [
318            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
319        ]
320
321        RAW_STRINGS = [
322            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
323        ]
324
325        KEYWORDS = {
326            **tokens.Tokenizer.KEYWORDS,
327            "ANY TYPE": TokenType.VARIANT,
328            "BEGIN": TokenType.COMMAND,
329            "BEGIN TRANSACTION": TokenType.BEGIN,
330            "BYTEINT": TokenType.INT,
331            "BYTES": TokenType.BINARY,
332            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
333            "DATETIME": TokenType.TIMESTAMP,
334            "DECLARE": TokenType.COMMAND,
335            "ELSEIF": TokenType.COMMAND,
336            "EXCEPTION": TokenType.COMMAND,
337            "FLOAT64": TokenType.DOUBLE,
338            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
339            "MODEL": TokenType.MODEL,
340            "NOT DETERMINISTIC": TokenType.VOLATILE,
341            "RECORD": TokenType.STRUCT,
342            "TIMESTAMP": TokenType.TIMESTAMPTZ,
343        }
344        KEYWORDS.pop("DIV")
345        KEYWORDS.pop("VALUES")
346        KEYWORDS.pop("/*+")
QUOTES = ["'", '"', '"""', "'''"]
COMMENTS = ['--', '#', ('/*', '*/')]
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
HEX_STRINGS = [('0x', ''), ('0X', '')]
BYTE_STRINGS = [("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS = [("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'ELSEIF': <TokenType.COMMAND: 'COMMAND'>, 'EXCEPTION': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'FOR SYSTEM_TIME': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'MODEL': <TokenType.MODEL: 'MODEL'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>}
class BigQuery.Parser(sqlglot.parser.Parser):
348    class Parser(parser.Parser):
349        PREFIXED_PIVOT_COLUMNS = True
350        LOG_DEFAULTS_TO_LN = True
351        SUPPORTS_IMPLICIT_UNNEST = True
352
353        FUNCTIONS = {
354            **parser.Parser.FUNCTIONS,
355            "DATE": _build_date,
356            "DATE_ADD": build_date_delta_with_interval(exp.DateAdd),
357            "DATE_SUB": build_date_delta_with_interval(exp.DateSub),
358            "DATE_TRUNC": lambda args: exp.DateTrunc(
359                unit=exp.Literal.string(str(seq_get(args, 1))),
360                this=seq_get(args, 0),
361            ),
362            "DATETIME": _build_datetime,
363            "DATETIME_ADD": build_date_delta_with_interval(exp.DatetimeAdd),
364            "DATETIME_SUB": build_date_delta_with_interval(exp.DatetimeSub),
365            "DIV": binary_from_function(exp.IntDiv),
366            "FORMAT_DATE": lambda args: exp.TimeToStr(
367                this=exp.TsOrDsToDate(this=seq_get(args, 1)), format=seq_get(args, 0)
368            ),
369            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
370            "JSON_EXTRACT_SCALAR": lambda args: exp.JSONExtractScalar(
371                this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string("$")
372            ),
373            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
374            "MD5": exp.MD5Digest.from_arg_list,
375            "TO_HEX": _build_to_hex,
376            "PARSE_DATE": lambda args: build_formatted_time(exp.StrToDate, "bigquery")(
377                [seq_get(args, 1), seq_get(args, 0)]
378            ),
379            "PARSE_TIMESTAMP": _build_parse_timestamp,
380            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
381            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
382                this=seq_get(args, 0),
383                expression=seq_get(args, 1),
384                position=seq_get(args, 2),
385                occurrence=seq_get(args, 3),
386                group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None,
387            ),
388            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
389            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
390            "SPLIT": lambda args: exp.Split(
391                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
392                this=seq_get(args, 0),
393                expression=seq_get(args, 1) or exp.Literal.string(","),
394            ),
395            "TIME": _build_time,
396            "TIME_ADD": build_date_delta_with_interval(exp.TimeAdd),
397            "TIME_SUB": build_date_delta_with_interval(exp.TimeSub),
398            "TIMESTAMP": _build_timestamp,
399            "TIMESTAMP_ADD": build_date_delta_with_interval(exp.TimestampAdd),
400            "TIMESTAMP_SUB": build_date_delta_with_interval(exp.TimestampSub),
401            "TIMESTAMP_MICROS": lambda args: exp.UnixToTime(
402                this=seq_get(args, 0), scale=exp.UnixToTime.MICROS
403            ),
404            "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime(
405                this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS
406            ),
407            "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)),
408            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
409            "FORMAT_DATETIME": lambda args: exp.TimeToStr(
410                this=exp.TsOrDsToTimestamp(this=seq_get(args, 1)), format=seq_get(args, 0)
411            ),
412        }
413
414        FUNCTION_PARSERS = {
415            **parser.Parser.FUNCTION_PARSERS,
416            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
417        }
418        FUNCTION_PARSERS.pop("TRIM")
419
420        NO_PAREN_FUNCTIONS = {
421            **parser.Parser.NO_PAREN_FUNCTIONS,
422            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
423        }
424
425        NESTED_TYPE_TOKENS = {
426            *parser.Parser.NESTED_TYPE_TOKENS,
427            TokenType.TABLE,
428        }
429
430        PROPERTY_PARSERS = {
431            **parser.Parser.PROPERTY_PARSERS,
432            "NOT DETERMINISTIC": lambda self: self.expression(
433                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
434            ),
435            "OPTIONS": lambda self: self._parse_with_property(),
436        }
437
438        CONSTRAINT_PARSERS = {
439            **parser.Parser.CONSTRAINT_PARSERS,
440            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
441        }
442
443        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
444        RANGE_PARSERS.pop(TokenType.OVERLAPS)
445
446        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
447
448        STATEMENT_PARSERS = {
449            **parser.Parser.STATEMENT_PARSERS,
450            TokenType.ELSE: lambda self: self._parse_as_command(self._prev),
451            TokenType.END: lambda self: self._parse_as_command(self._prev),
452            TokenType.FOR: lambda self: self._parse_for_in(),
453        }
454
455        BRACKET_OFFSETS = {
456            "OFFSET": (0, False),
457            "ORDINAL": (1, False),
458            "SAFE_OFFSET": (0, True),
459            "SAFE_ORDINAL": (1, True),
460        }
461
462        def _parse_for_in(self) -> exp.ForIn:
463            this = self._parse_range()
464            self._match_text_seq("DO")
465            return self.expression(exp.ForIn, this=this, expression=self._parse_statement())
466
467        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
468            this = super()._parse_table_part(schema=schema) or self._parse_number()
469
470            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
471            if isinstance(this, exp.Identifier):
472                table_name = this.name
473                while self._match(TokenType.DASH, advance=False) and self._next:
474                    text = ""
475                    while self._curr and self._curr.token_type != TokenType.DOT:
476                        self._advance()
477                        text += self._prev.text
478                    table_name += text
479
480                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
481            elif isinstance(this, exp.Literal):
482                table_name = this.name
483
484                if self._is_connected() and self._parse_var(any_token=True):
485                    table_name += self._prev.text
486
487                this = exp.Identifier(this=table_name, quoted=True)
488
489            return this
490
491        def _parse_table_parts(
492            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
493        ) -> exp.Table:
494            table = super()._parse_table_parts(
495                schema=schema, is_db_reference=is_db_reference, wildcard=True
496            )
497
498            # proj-1.db.tbl -- `1.` is tokenized as a float so we need to unravel it here
499            if not table.catalog:
500                if table.db:
501                    parts = table.db.split(".")
502                    if len(parts) == 2 and not table.args["db"].quoted:
503                        table.set("catalog", exp.Identifier(this=parts[0]))
504                        table.set("db", exp.Identifier(this=parts[1]))
505                else:
506                    parts = table.name.split(".")
507                    if len(parts) == 2 and not table.this.quoted:
508                        table.set("db", exp.Identifier(this=parts[0]))
509                        table.set("this", exp.Identifier(this=parts[1]))
510
511            if isinstance(table.this, exp.Identifier) and any("." in p.name for p in table.parts):
512                catalog, db, this, *rest = (
513                    exp.to_identifier(p, quoted=True)
514                    for p in split_num_words(".".join(p.name for p in table.parts), ".", 3)
515                )
516
517                if rest and this:
518                    this = exp.Dot.build([this, *rest])  # type: ignore
519
520                table = exp.Table(
521                    this=this, db=db, catalog=catalog, pivots=table.args.get("pivots")
522                )
523                table.meta["quoted_table"] = True
524
525            return table
526
527        def _parse_column(self) -> t.Optional[exp.Expression]:
528            column = super()._parse_column()
529            if isinstance(column, exp.Column):
530                parts = column.parts
531                if any("." in p.name for p in parts):
532                    catalog, db, table, this, *rest = (
533                        exp.to_identifier(p, quoted=True)
534                        for p in split_num_words(".".join(p.name for p in parts), ".", 4)
535                    )
536
537                    if rest and this:
538                        this = exp.Dot.build([this, *rest])  # type: ignore
539
540                    column = exp.Column(this=this, table=table, db=db, catalog=catalog)
541                    column.meta["quoted_column"] = True
542
543            return column
544
545        @t.overload
546        def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: ...
547
548        @t.overload
549        def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: ...
550
551        def _parse_json_object(self, agg=False):
552            json_object = super()._parse_json_object()
553            array_kv_pair = seq_get(json_object.expressions, 0)
554
555            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
556            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
557            if (
558                array_kv_pair
559                and isinstance(array_kv_pair.this, exp.Array)
560                and isinstance(array_kv_pair.expression, exp.Array)
561            ):
562                keys = array_kv_pair.this.expressions
563                values = array_kv_pair.expression.expressions
564
565                json_object.set(
566                    "expressions",
567                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
568                )
569
570            return json_object
571
572        def _parse_bracket(
573            self, this: t.Optional[exp.Expression] = None
574        ) -> t.Optional[exp.Expression]:
575            bracket = super()._parse_bracket(this)
576
577            if this is bracket:
578                return bracket
579
580            if isinstance(bracket, exp.Bracket):
581                for expression in bracket.expressions:
582                    name = expression.name.upper()
583
584                    if name not in self.BRACKET_OFFSETS:
585                        break
586
587                    offset, safe = self.BRACKET_OFFSETS[name]
588                    bracket.set("offset", offset)
589                    bracket.set("safe", safe)
590                    expression.replace(expression.expressions[0])
591
592            return bracket
593
594        def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]:
595            unnest = super()._parse_unnest(with_alias=with_alias)
596
597            if not unnest:
598                return None
599
600            unnest_expr = seq_get(unnest.expressions, 0)
601            if unnest_expr:
602                from sqlglot.optimizer.annotate_types import annotate_types
603
604                unnest_expr = annotate_types(unnest_expr)
605
606                # Unnesting a nested array (i.e array of structs) explodes the top-level struct fields,
607                # in contrast to other dialects such as DuckDB which flattens only the array by default
608                if unnest_expr.is_type(exp.DataType.Type.ARRAY) and any(
609                    array_elem.is_type(exp.DataType.Type.STRUCT)
610                    for array_elem in unnest_expr._type.expressions
611                ):
612                    unnest.set("explode_array", True)
613
614            return unnest

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
PREFIXED_PIVOT_COLUMNS = True
LOG_DEFAULTS_TO_LN = True
SUPPORTS_IMPLICIT_UNNEST = True
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_date>, 'DATE_ADD': <function build_date_delta_with_interval.<locals>._builder>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function build_date_delta_with_interval.<locals>._builder>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME': <function _build_datetime>, 'DATETIME_ADD': <function build_date_delta_with_interval.<locals>._builder>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function build_date_delta_with_interval.<locals>._builder>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Flatten'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function BigQuery.Parser.<lambda>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function BigQuery.Parser.<lambda>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <function _build_time>, 'TIME_ADD': <function build_date_delta_with_interval.<locals>._builder>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function build_date_delta_with_interval.<locals>._builder>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <function _build_timestamp>, 'TIMESTAMP_ADD': <function build_date_delta_with_interval.<locals>._builder>, 'TIMESTAMPDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampFromParts'>>, 'TIMESTAMPFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampFromParts'>>, 'TIMESTAMP_SUB': <function build_date_delta_with_interval.<locals>._builder>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToNumber'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'TO_HEX': <function _build_to_hex>, 'DIV': <function binary_from_function.<locals>.<lambda>>, 'FORMAT_DATE': <function BigQuery.Parser.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _build_parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SHA256': <function BigQuery.Parser.<lambda>>, 'SHA512': <function BigQuery.Parser.<lambda>>, 'TIMESTAMP_MICROS': <function BigQuery.Parser.<lambda>>, 'TIMESTAMP_MILLIS': <function BigQuery.Parser.<lambda>>, 'TIMESTAMP_SECONDS': <function BigQuery.Parser.<lambda>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'FORMAT_DATETIME': <function BigQuery.Parser.<lambda>>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS = {<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS = {<TokenType.NULLABLE: 'NULLABLE'>, <TokenType.MAP: 'MAP'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.LIST: 'LIST'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.NESTED: 'NESTED'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>}
NULL_TOKENS = {<TokenType.NULL: 'NULL'>, <TokenType.UNKNOWN: 'UNKNOWN'>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.ELSE: 'ELSE'>: <function BigQuery.Parser.<lambda>>, <TokenType.END: 'END'>: <function BigQuery.Parser.<lambda>>, <TokenType.FOR: 'FOR'>: <function BigQuery.Parser.<lambda>>}
BRACKET_OFFSETS = {'OFFSET': (0, False), 'ORDINAL': (1, False), 'SAFE_OFFSET': (0, True), 'SAFE_ORDINAL': (1, True)}
ID_VAR_TOKENS = {<TokenType.NULLABLE: 'NULLABLE'>, <TokenType.DIV: 'DIV'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.INT256: 'INT256'>, <TokenType.RANGE: 'RANGE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.DATE: 'DATE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.RENAME: 'RENAME'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.INT: 'INT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.MAP: 'MAP'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.UUID: 'UUID'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.LEFT: 'LEFT'>, <TokenType.NEXT: 'NEXT'>, <TokenType.INDEX: 'INDEX'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.INET: 'INET'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.DELETE: 'DELETE'>, <TokenType.UINT: 'UINT'>, <TokenType.JSON: 'JSON'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.ASOF: 'ASOF'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.NAME: 'NAME'>, <TokenType.BIT: 'BIT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.LIST: 'LIST'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.UINT128: 'UINT128'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SET: 'SET'>, <TokenType.ANY: 'ANY'>, <TokenType.FULL: 'FULL'>, <TokenType.ANTI: 'ANTI'>, <TokenType.ASC: 'ASC'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.CHAR: 'CHAR'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FINAL: 'FINAL'>, <TokenType.USE: 'USE'>, <TokenType.DESC: 'DESC'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.CASE: 'CASE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.XML: 'XML'>, <TokenType.IPV4: 'IPV4'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.NESTED: 'NESTED'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.ENUM: 'ENUM'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.SOME: 'SOME'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.INT128: 'INT128'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.TIME: 'TIME'>, <TokenType.IS: 'IS'>, <TokenType.DATE32: 'DATE32'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.CACHE: 'CACHE'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.CUBE: 'CUBE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.ALL: 'ALL'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.UINT256: 'UINT256'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.COPY: 'COPY'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.TAG: 'TAG'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.APPLY: 'APPLY'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.SUPER: 'SUPER'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TOP: 'TOP'>, <TokenType.NULL: 'NULL'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.END: 'END'>, <TokenType.MERGE: 'MERGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VAR: 'VAR'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TEXT: 'TEXT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.MODEL: 'MODEL'>, <TokenType.FIRST: 'FIRST'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TRUE: 'TRUE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.IMAGE: 'IMAGE'>}
TABLE_ALIAS_TOKENS = {<TokenType.NULLABLE: 'NULLABLE'>, <TokenType.DIV: 'DIV'>, <TokenType.INT256: 'INT256'>, <TokenType.BINARY: 'BINARY'>, <TokenType.RANGE: 'RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.DATE: 'DATE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.RENAME: 'RENAME'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.INT: 'INT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.MAP: 'MAP'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.UUID: 'UUID'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.NEXT: 'NEXT'>, <TokenType.INDEX: 'INDEX'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.INET: 'INET'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.DELETE: 'DELETE'>, <TokenType.UINT: 'UINT'>, <TokenType.JSON: 'JSON'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.NAME: 'NAME'>, <TokenType.BIT: 'BIT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.LIST: 'LIST'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.UINT128: 'UINT128'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SET: 'SET'>, <TokenType.ANY: 'ANY'>, <TokenType.ANTI: 'ANTI'>, <TokenType.ASC: 'ASC'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.CHAR: 'CHAR'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FINAL: 'FINAL'>, <TokenType.USE: 'USE'>, <TokenType.DESC: 'DESC'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.CASE: 'CASE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.XML: 'XML'>, <TokenType.IPV4: 'IPV4'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.NESTED: 'NESTED'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.ENUM: 'ENUM'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.SOME: 'SOME'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.INT128: 'INT128'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.TIME: 'TIME'>, <TokenType.IS: 'IS'>, <TokenType.DATE32: 'DATE32'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.CACHE: 'CACHE'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.CUBE: 'CUBE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.ALL: 'ALL'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.UINT256: 'UINT256'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.COPY: 'COPY'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.TAG: 'TAG'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.SUPER: 'SUPER'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TOP: 'TOP'>, <TokenType.NULL: 'NULL'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.END: 'END'>, <TokenType.MERGE: 'MERGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VAR: 'VAR'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TEXT: 'TEXT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.MODEL: 'MODEL'>, <TokenType.FIRST: 'FIRST'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TRUE: 'TRUE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.IMAGE: 'IMAGE'>}
SHOW_TRIE: Dict = {}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
STRUCT_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
INTERVAL_VARS
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
TIMESTAMPS
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
LAMBDAS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
SHOW_PARSERS
TYPE_LITERAL_PARSERS
TYPE_CONVERTERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
KEY_CONSTRAINT_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
STRICT_CAST
IDENTIFY_PIVOT_STRINGS
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
DEFAULT_SAMPLING_METHOD
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
COLON_IS_VARIANT_EXTRACT
VALUES_FOLLOWED_BY_PAREN
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class BigQuery.Generator(sqlglot.generator.Generator):
616    class Generator(generator.Generator):
617        INTERVAL_ALLOWS_PLURAL_FORM = False
618        JOIN_HINTS = False
619        QUERY_HINTS = False
620        TABLE_HINTS = False
621        LIMIT_FETCH = "LIMIT"
622        RENAME_TABLE_WITH_DB = False
623        NVL2_SUPPORTED = False
624        UNNEST_WITH_ORDINALITY = False
625        COLLATE_IS_FUNC = True
626        LIMIT_ONLY_LITERALS = True
627        SUPPORTS_TABLE_ALIAS_COLUMNS = False
628        UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
629        JSON_KEY_VALUE_PAIR_SEP = ","
630        NULL_ORDERING_SUPPORTED = False
631        IGNORE_NULLS_IN_FUNC = True
632        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
633        CAN_IMPLEMENT_ARRAY_ANY = True
634        SUPPORTS_TO_NUMBER = False
635        NAMED_PLACEHOLDER_TOKEN = "@"
636        HEX_FUNC = "TO_HEX"
637        WITH_PROPERTIES_PREFIX = "OPTIONS"
638        SUPPORTS_EXPLODING_PROJECTIONS = False
639        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
640
641        TRANSFORMS = {
642            **generator.Generator.TRANSFORMS,
643            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
644            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
645            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
646            exp.Array: inline_array_unless_query,
647            exp.ArrayContains: _array_contains_sql,
648            exp.ArrayFilter: filter_array_using_unnest,
649            exp.ArraySize: rename_func("ARRAY_LENGTH"),
650            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
651            exp.CollateProperty: lambda self, e: (
652                f"DEFAULT COLLATE {self.sql(e, 'this')}"
653                if e.args.get("default")
654                else f"COLLATE {self.sql(e, 'this')}"
655            ),
656            exp.Commit: lambda *_: "COMMIT TRANSACTION",
657            exp.CountIf: rename_func("COUNTIF"),
658            exp.Create: _create_sql,
659            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
660            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
661            exp.DateDiff: lambda self, e: self.func(
662                "DATE_DIFF", e.this, e.expression, unit_to_var(e)
663            ),
664            exp.DateFromParts: rename_func("DATE"),
665            exp.DateStrToDate: datestrtodate_sql,
666            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
667            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
668            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
669            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
670            exp.FromTimeZone: lambda self, e: self.func(
671                "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'"
672            ),
673            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
674            exp.GroupConcat: rename_func("STRING_AGG"),
675            exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))),
676            exp.If: if_sql(false_value="NULL"),
677            exp.ILike: no_ilike_sql,
678            exp.IntDiv: rename_func("DIV"),
679            exp.JSONFormat: rename_func("TO_JSON_STRING"),
680            exp.Max: max_or_greatest,
681            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
682            exp.MD5Digest: rename_func("MD5"),
683            exp.Min: min_or_least,
684            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
685            exp.RegexpExtract: lambda self, e: self.func(
686                "REGEXP_EXTRACT",
687                e.this,
688                e.expression,
689                e.args.get("position"),
690                e.args.get("occurrence"),
691            ),
692            exp.RegexpReplace: regexp_replace_sql,
693            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
694            exp.ReturnsProperty: _returnsproperty_sql,
695            exp.Rollback: lambda *_: "ROLLBACK TRANSACTION",
696            exp.Select: transforms.preprocess(
697                [
698                    transforms.explode_to_unnest(),
699                    transforms.unqualify_unnest,
700                    transforms.eliminate_distinct_on,
701                    _alias_ordered_group,
702                    transforms.eliminate_semi_and_anti_joins,
703                ]
704            ),
705            exp.SHA: rename_func("SHA1"),
706            exp.SHA2: sha256_sql,
707            exp.StabilityProperty: lambda self, e: (
708                "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
709            ),
710            exp.StrToDate: _str_to_datetime_sql,
711            exp.StrToTime: _str_to_datetime_sql,
712            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
713            exp.TimeFromParts: rename_func("TIME"),
714            exp.TimestampFromParts: rename_func("DATETIME"),
715            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
716            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
717            exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"),
718            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
719            exp.TimeStrToTime: timestrtotime_sql,
720            exp.Transaction: lambda *_: "BEGIN TRANSACTION",
721            exp.TsOrDsAdd: _ts_or_ds_add_sql,
722            exp.TsOrDsDiff: _ts_or_ds_diff_sql,
723            exp.TsOrDsToTime: rename_func("TIME"),
724            exp.TsOrDsToTimestamp: rename_func("DATETIME"),
725            exp.Unhex: rename_func("FROM_HEX"),
726            exp.UnixDate: rename_func("UNIX_DATE"),
727            exp.UnixToTime: _unix_to_time_sql,
728            exp.Values: _derived_table_values_to_unnest,
729            exp.VariancePop: rename_func("VAR_POP"),
730        }
731
732        SUPPORTED_JSON_PATH_PARTS = {
733            exp.JSONPathKey,
734            exp.JSONPathRoot,
735            exp.JSONPathSubscript,
736        }
737
738        TYPE_MAPPING = {
739            **generator.Generator.TYPE_MAPPING,
740            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
741            exp.DataType.Type.BIGINT: "INT64",
742            exp.DataType.Type.BINARY: "BYTES",
743            exp.DataType.Type.BOOLEAN: "BOOL",
744            exp.DataType.Type.CHAR: "STRING",
745            exp.DataType.Type.DECIMAL: "NUMERIC",
746            exp.DataType.Type.DOUBLE: "FLOAT64",
747            exp.DataType.Type.FLOAT: "FLOAT64",
748            exp.DataType.Type.INT: "INT64",
749            exp.DataType.Type.NCHAR: "STRING",
750            exp.DataType.Type.NVARCHAR: "STRING",
751            exp.DataType.Type.SMALLINT: "INT64",
752            exp.DataType.Type.TEXT: "STRING",
753            exp.DataType.Type.TIMESTAMP: "DATETIME",
754            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
755            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
756            exp.DataType.Type.TINYINT: "INT64",
757            exp.DataType.Type.VARBINARY: "BYTES",
758            exp.DataType.Type.ROWVERSION: "BYTES",
759            exp.DataType.Type.VARCHAR: "STRING",
760            exp.DataType.Type.VARIANT: "ANY TYPE",
761        }
762
763        PROPERTIES_LOCATION = {
764            **generator.Generator.PROPERTIES_LOCATION,
765            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
766            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
767        }
768
769        # WINDOW comes after QUALIFY
770        # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#window_clause
771        AFTER_HAVING_MODIFIER_TRANSFORMS = {
772            "qualify": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["qualify"],
773            "windows": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["windows"],
774        }
775
776        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
777        RESERVED_KEYWORDS = {
778            "all",
779            "and",
780            "any",
781            "array",
782            "as",
783            "asc",
784            "assert_rows_modified",
785            "at",
786            "between",
787            "by",
788            "case",
789            "cast",
790            "collate",
791            "contains",
792            "create",
793            "cross",
794            "cube",
795            "current",
796            "default",
797            "define",
798            "desc",
799            "distinct",
800            "else",
801            "end",
802            "enum",
803            "escape",
804            "except",
805            "exclude",
806            "exists",
807            "extract",
808            "false",
809            "fetch",
810            "following",
811            "for",
812            "from",
813            "full",
814            "group",
815            "grouping",
816            "groups",
817            "hash",
818            "having",
819            "if",
820            "ignore",
821            "in",
822            "inner",
823            "intersect",
824            "interval",
825            "into",
826            "is",
827            "join",
828            "lateral",
829            "left",
830            "like",
831            "limit",
832            "lookup",
833            "merge",
834            "natural",
835            "new",
836            "no",
837            "not",
838            "null",
839            "nulls",
840            "of",
841            "on",
842            "or",
843            "order",
844            "outer",
845            "over",
846            "partition",
847            "preceding",
848            "proto",
849            "qualify",
850            "range",
851            "recursive",
852            "respect",
853            "right",
854            "rollup",
855            "rows",
856            "select",
857            "set",
858            "some",
859            "struct",
860            "tablesample",
861            "then",
862            "to",
863            "treat",
864            "true",
865            "unbounded",
866            "union",
867            "unnest",
868            "using",
869            "when",
870            "where",
871            "window",
872            "with",
873            "within",
874        }
875
876        def mod_sql(self, expression: exp.Mod) -> str:
877            this = expression.this
878            expr = expression.expression
879            return self.func(
880                "MOD",
881                this.unnest() if isinstance(this, exp.Paren) else this,
882                expr.unnest() if isinstance(expr, exp.Paren) else expr,
883            )
884
885        def column_parts(self, expression: exp.Column) -> str:
886            if expression.meta.get("quoted_column"):
887                # If a column reference is of the form `dataset.table`.name, we need
888                # to preserve the quoted table path, otherwise the reference breaks
889                table_parts = ".".join(p.name for p in expression.parts[:-1])
890                table_path = self.sql(exp.Identifier(this=table_parts, quoted=True))
891                return f"{table_path}.{self.sql(expression, 'this')}"
892
893            return super().column_parts(expression)
894
895        def table_parts(self, expression: exp.Table) -> str:
896            # Depending on the context, `x.y` may not resolve to the same data source as `x`.`y`, so
897            # we need to make sure the correct quoting is used in each case.
898            #
899            # For example, if there is a CTE x that clashes with a schema name, then the former will
900            # return the table y in that schema, whereas the latter will return the CTE's y column:
901            #
902            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x.y`   -> cross join
903            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x`.`y` -> implicit unnest
904            if expression.meta.get("quoted_table"):
905                table_parts = ".".join(p.name for p in expression.parts)
906                return self.sql(exp.Identifier(this=table_parts, quoted=True))
907
908            return super().table_parts(expression)
909
910        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
911            if isinstance(expression.this, exp.TsOrDsToTimestamp):
912                func_name = "FORMAT_DATETIME"
913            else:
914                func_name = "FORMAT_DATE"
915            this = (
916                expression.this
917                if isinstance(expression.this, (exp.TsOrDsToTimestamp, exp.TsOrDsToDate))
918                else expression
919            )
920            return self.func(func_name, self.format_time(expression), this.this)
921
922        def eq_sql(self, expression: exp.EQ) -> str:
923            # Operands of = cannot be NULL in BigQuery
924            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
925                if not isinstance(expression.parent, exp.Update):
926                    return "NULL"
927
928            return self.binary(expression, "=")
929
930        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
931            parent = expression.parent
932
933            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
934            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
935            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
936                return self.func(
937                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
938                )
939
940            return super().attimezone_sql(expression)
941
942        def trycast_sql(self, expression: exp.TryCast) -> str:
943            return self.cast_sql(expression, safe_prefix="SAFE_")
944
945        def bracket_sql(self, expression: exp.Bracket) -> str:
946            this = expression.this
947            expressions = expression.expressions
948
949            if len(expressions) == 1 and this and this.is_type(exp.DataType.Type.STRUCT):
950                arg = expressions[0]
951                if arg.type is None:
952                    from sqlglot.optimizer.annotate_types import annotate_types
953
954                    arg = annotate_types(arg)
955
956                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
957                    # BQ doesn't support bracket syntax with string values for structs
958                    return f"{self.sql(this)}.{arg.name}"
959
960            expressions_sql = self.expressions(expression, flat=True)
961            offset = expression.args.get("offset")
962
963            if offset == 0:
964                expressions_sql = f"OFFSET({expressions_sql})"
965            elif offset == 1:
966                expressions_sql = f"ORDINAL({expressions_sql})"
967            elif offset is not None:
968                self.unsupported(f"Unsupported array offset: {offset}")
969
970            if expression.args.get("safe"):
971                expressions_sql = f"SAFE_{expressions_sql}"
972
973            return f"{self.sql(this)}[{expressions_sql}]"
974
975        def in_unnest_op(self, expression: exp.Unnest) -> str:
976            return self.sql(expression)
977
978        def version_sql(self, expression: exp.Version) -> str:
979            if expression.name == "TIMESTAMP":
980                expression.set("this", "SYSTEM_TIME")
981            return super().version_sql(expression)

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
INTERVAL_ALLOWS_PLURAL_FORM = False
JOIN_HINTS = False
QUERY_HINTS = False
TABLE_HINTS = False
LIMIT_FETCH = 'LIMIT'
RENAME_TABLE_WITH_DB = False
NVL2_SUPPORTED = False
UNNEST_WITH_ORDINALITY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
SUPPORTS_TABLE_ALIAS_COLUMNS = False
UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
JSON_KEY_VALUE_PAIR_SEP = ','
NULL_ORDERING_SUPPORTED = False
IGNORE_NULLS_IN_FUNC = True
JSON_PATH_SINGLE_QUOTE_ESCAPE = True
CAN_IMPLEMENT_ARRAY_ANY = True
SUPPORTS_TO_NUMBER = False
NAMED_PLACEHOLDER_TOKEN = '@'
HEX_FUNC = 'TO_HEX'
WITH_PROPERTIES_PREFIX = 'OPTIONS'
SUPPORTS_EXPLODING_PROJECTIONS = False
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TagColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function arg_max_or_min_no_count.<locals>._arg_max_or_min_sql>, <class 'sqlglot.expressions.ArgMin'>: <function arg_max_or_min_no_count.<locals>._arg_max_or_min_sql>, <class 'sqlglot.expressions.Array'>: <function inline_array_unless_query>, <class 'sqlglot.expressions.ArrayContains'>: <function _array_contains_sql>, <class 'sqlglot.expressions.ArrayFilter'>: <function filter_array_using_unnest>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.CollateProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.Commit'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.CountIf'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Rollback'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.SHA2'>: <function sha256_sql>, <class 'sqlglot.expressions.StrToDate'>: <function _str_to_datetime_sql>, <class 'sqlglot.expressions.StrToTime'>: <function _str_to_datetime_sql>, <class 'sqlglot.expressions.TimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampDiff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Transaction'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _ts_or_ds_add_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function _ts_or_ds_diff_sql>, <class 'sqlglot.expressions.TsOrDsToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsToTimestamp'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixDate'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function _unix_to_time_sql>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'BYTES', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
AFTER_HAVING_MODIFIER_TRANSFORMS = {'qualify': <function Generator.<lambda>>, 'windows': <function Generator.<lambda>>}
RESERVED_KEYWORDS = {'limit', 'order', 'null', 'window', 'not', 'set', 'at', 'lateral', 'using', 'partition', 'full', 'natural', 'join', 'tablesample', 'into', 'when', 'contains', 'desc', 'following', 'current', 'respect', 'rollup', 'rows', 'new', 'groups', 'cast', 'unnest', 'asc', 'then', 'hash', 'by', 'of', 'treat', 'right', 'where', 'extract', 'default', 'over', 'cube', 'nulls', 'qualify', 'false', 'proto', 'range', 'distinct', 'some', 'having', 'interval', 'in', 'from', 'and', 'for', 'cross', 'inner', 'or', 'enum', 'unbounded', 'to', 'struct', 'create', 'exists', 'recursive', 'grouping', 'true', 'end', 'except', 'lookup', 'left', 'as', 'exclude', 'like', 'is', 'outer', 'define', 'select', 'else', 'collate', 'within', 'assert_rows_modified', 'on', 'with', 'escape', 'any', 'all', 'group', 'no', 'if', 'intersect', 'array', 'fetch', 'case', 'merge', 'between', 'union', 'preceding', 'ignore'}
def mod_sql(self, expression: sqlglot.expressions.Mod) -> str:
876        def mod_sql(self, expression: exp.Mod) -> str:
877            this = expression.this
878            expr = expression.expression
879            return self.func(
880                "MOD",
881                this.unnest() if isinstance(this, exp.Paren) else this,
882                expr.unnest() if isinstance(expr, exp.Paren) else expr,
883            )
def column_parts(self, expression: sqlglot.expressions.Column) -> str:
885        def column_parts(self, expression: exp.Column) -> str:
886            if expression.meta.get("quoted_column"):
887                # If a column reference is of the form `dataset.table`.name, we need
888                # to preserve the quoted table path, otherwise the reference breaks
889                table_parts = ".".join(p.name for p in expression.parts[:-1])
890                table_path = self.sql(exp.Identifier(this=table_parts, quoted=True))
891                return f"{table_path}.{self.sql(expression, 'this')}"
892
893            return super().column_parts(expression)
def table_parts(self, expression: sqlglot.expressions.Table) -> str:
895        def table_parts(self, expression: exp.Table) -> str:
896            # Depending on the context, `x.y` may not resolve to the same data source as `x`.`y`, so
897            # we need to make sure the correct quoting is used in each case.
898            #
899            # For example, if there is a CTE x that clashes with a schema name, then the former will
900            # return the table y in that schema, whereas the latter will return the CTE's y column:
901            #
902            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x.y`   -> cross join
903            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x`.`y` -> implicit unnest
904            if expression.meta.get("quoted_table"):
905                table_parts = ".".join(p.name for p in expression.parts)
906                return self.sql(exp.Identifier(this=table_parts, quoted=True))
907
908            return super().table_parts(expression)
def timetostr_sql(self, expression: sqlglot.expressions.TimeToStr) -> str:
910        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
911            if isinstance(expression.this, exp.TsOrDsToTimestamp):
912                func_name = "FORMAT_DATETIME"
913            else:
914                func_name = "FORMAT_DATE"
915            this = (
916                expression.this
917                if isinstance(expression.this, (exp.TsOrDsToTimestamp, exp.TsOrDsToDate))
918                else expression
919            )
920            return self.func(func_name, self.format_time(expression), this.this)
def eq_sql(self, expression: sqlglot.expressions.EQ) -> str:
922        def eq_sql(self, expression: exp.EQ) -> str:
923            # Operands of = cannot be NULL in BigQuery
924            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
925                if not isinstance(expression.parent, exp.Update):
926                    return "NULL"
927
928            return self.binary(expression, "=")
def attimezone_sql(self, expression: sqlglot.expressions.AtTimeZone) -> str:
930        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
931            parent = expression.parent
932
933            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
934            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
935            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
936                return self.func(
937                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
938                )
939
940            return super().attimezone_sql(expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
942        def trycast_sql(self, expression: exp.TryCast) -> str:
943            return self.cast_sql(expression, safe_prefix="SAFE_")
def bracket_sql(self, expression: sqlglot.expressions.Bracket) -> str:
945        def bracket_sql(self, expression: exp.Bracket) -> str:
946            this = expression.this
947            expressions = expression.expressions
948
949            if len(expressions) == 1 and this and this.is_type(exp.DataType.Type.STRUCT):
950                arg = expressions[0]
951                if arg.type is None:
952                    from sqlglot.optimizer.annotate_types import annotate_types
953
954                    arg = annotate_types(arg)
955
956                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
957                    # BQ doesn't support bracket syntax with string values for structs
958                    return f"{self.sql(this)}.{arg.name}"
959
960            expressions_sql = self.expressions(expression, flat=True)
961            offset = expression.args.get("offset")
962
963            if offset == 0:
964                expressions_sql = f"OFFSET({expressions_sql})"
965            elif offset == 1:
966                expressions_sql = f"ORDINAL({expressions_sql})"
967            elif offset is not None:
968                self.unsupported(f"Unsupported array offset: {offset}")
969
970            if expression.args.get("safe"):
971                expressions_sql = f"SAFE_{expressions_sql}"
972
973            return f"{self.sql(this)}[{expressions_sql}]"
def in_unnest_op(self, expression: sqlglot.expressions.Unnest) -> str:
975        def in_unnest_op(self, expression: exp.Unnest) -> str:
976            return self.sql(expression)
def version_sql(self, expression: sqlglot.expressions.Version) -> str:
978        def version_sql(self, expression: exp.Version) -> str:
979            if expression.name == "TIMESTAMP":
980                expression.set("this", "SYSTEM_TIME")
981            return super().version_sql(expression)
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
SUPPORTS_NULLABLE_TYPES = False
Inherited Members
sqlglot.generator.Generator
Generator
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
MATCHED_BY_SOURCE
SINGLE_STRING_INTERVAL
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
SELECT_KINDS
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
AGGREGATE_FILTER_SUPPORTED
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
SUPPORTS_TABLE_COPY
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
INSERT_OVERWRITE
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
SET_OP_MODIFIERS
COPY_PARAMS_ARE_WRAPPED
COPY_PARAMS_EQ_REQUIRED
COPY_HAS_INTO_KEYWORD
STAR_EXCEPT
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
ARRAY_CONCAT_IS_VAR_LEN
SUPPORTS_CONVERT_TIMEZONE
PARSE_JSON_NAME
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
PARAMETER_TOKEN
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
describe_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
datatype_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
with_properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
unnest_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterdiststyle_sql
altersortkey_sql
renametable_sql
renamecolumn_sql
alterset_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
try_sql
log_sql
use_sql
binary
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
tonumber_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
struct_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonexists_sql
arrayagg_sql