Edit on GitHub

sqlglot.dialects.hive

  1from __future__ import annotations
  2
  3import typing as t
  4
  5from sqlglot import exp, generator, parser, tokens, transforms
  6from sqlglot.dialects.dialect import (
  7    DATE_ADD_OR_SUB,
  8    Dialect,
  9    NormalizationStrategy,
 10    approx_count_distinct_sql,
 11    arg_max_or_min_no_count,
 12    datestrtodate_sql,
 13    build_formatted_time,
 14    if_sql,
 15    is_parse_json,
 16    left_to_substring_sql,
 17    locate_to_strposition,
 18    max_or_greatest,
 19    min_or_least,
 20    no_ilike_sql,
 21    no_recursive_cte_sql,
 22    no_safe_divide_sql,
 23    no_trycast_sql,
 24    regexp_extract_sql,
 25    regexp_replace_sql,
 26    rename_func,
 27    right_to_substring_sql,
 28    strposition_to_locate_sql,
 29    struct_extract_sql,
 30    time_format,
 31    timestrtotime_sql,
 32    var_map_sql,
 33)
 34from sqlglot.transforms import (
 35    remove_unique_constraints,
 36    ctas_with_tmp_tables_to_create_tmp_view,
 37    preprocess,
 38    move_schema_columns_to_partitioned_by,
 39)
 40from sqlglot.helper import seq_get
 41from sqlglot.tokens import TokenType
 42
 43# (FuncType, Multiplier)
 44DATE_DELTA_INTERVAL = {
 45    "YEAR": ("ADD_MONTHS", 12),
 46    "MONTH": ("ADD_MONTHS", 1),
 47    "QUARTER": ("ADD_MONTHS", 3),
 48    "WEEK": ("DATE_ADD", 7),
 49    "DAY": ("DATE_ADD", 1),
 50}
 51
 52TIME_DIFF_FACTOR = {
 53    "MILLISECOND": " * 1000",
 54    "SECOND": "",
 55    "MINUTE": " / 60",
 56    "HOUR": " / 3600",
 57}
 58
 59DIFF_MONTH_SWITCH = ("YEAR", "QUARTER", "MONTH")
 60
 61
 62def _add_date_sql(self: Hive.Generator, expression: DATE_ADD_OR_SUB) -> str:
 63    if isinstance(expression, exp.TsOrDsAdd) and not expression.unit:
 64        return self.func("DATE_ADD", expression.this, expression.expression)
 65
 66    unit = expression.text("unit").upper()
 67    func, multiplier = DATE_DELTA_INTERVAL.get(unit, ("DATE_ADD", 1))
 68
 69    if isinstance(expression, exp.DateSub):
 70        multiplier *= -1
 71
 72    if expression.expression.is_number:
 73        modified_increment = exp.Literal.number(int(expression.text("expression")) * multiplier)
 74    else:
 75        modified_increment = expression.expression
 76        if multiplier != 1:
 77            modified_increment = exp.Mul(  # type: ignore
 78                this=modified_increment, expression=exp.Literal.number(multiplier)
 79            )
 80
 81    return self.func(func, expression.this, modified_increment)
 82
 83
 84def _date_diff_sql(self: Hive.Generator, expression: exp.DateDiff | exp.TsOrDsDiff) -> str:
 85    unit = expression.text("unit").upper()
 86
 87    factor = TIME_DIFF_FACTOR.get(unit)
 88    if factor is not None:
 89        left = self.sql(expression, "this")
 90        right = self.sql(expression, "expression")
 91        sec_diff = f"UNIX_TIMESTAMP({left}) - UNIX_TIMESTAMP({right})"
 92        return f"({sec_diff}){factor}" if factor else sec_diff
 93
 94    months_between = unit in DIFF_MONTH_SWITCH
 95    sql_func = "MONTHS_BETWEEN" if months_between else "DATEDIFF"
 96    _, multiplier = DATE_DELTA_INTERVAL.get(unit, ("", 1))
 97    multiplier_sql = f" / {multiplier}" if multiplier > 1 else ""
 98    diff_sql = f"{sql_func}({self.format_args(expression.this, expression.expression)})"
 99
100    if months_between or multiplier_sql:
101        # MONTHS_BETWEEN returns a float, so we need to truncate the fractional part.
102        # For the same reason, we want to truncate if there's a divisor present.
103        diff_sql = f"CAST({diff_sql}{multiplier_sql} AS INT)"
104
105    return diff_sql
106
107
108def _json_format_sql(self: Hive.Generator, expression: exp.JSONFormat) -> str:
109    this = expression.this
110
111    if is_parse_json(this):
112        if this.this.is_string:
113            # Since FROM_JSON requires a nested type, we always wrap the json string with
114            # an array to ensure that "naked" strings like "'a'" will be handled correctly
115            wrapped_json = exp.Literal.string(f"[{this.this.name}]")
116
117            from_json = self.func(
118                "FROM_JSON", wrapped_json, self.func("SCHEMA_OF_JSON", wrapped_json)
119            )
120            to_json = self.func("TO_JSON", from_json)
121
122            # This strips the [, ] delimiters of the dummy array printed by TO_JSON
123            return self.func("REGEXP_EXTRACT", to_json, "'^.(.*).$'", "1")
124        return self.sql(this)
125
126    return self.func("TO_JSON", this, expression.args.get("options"))
127
128
129def _array_sort_sql(self: Hive.Generator, expression: exp.ArraySort) -> str:
130    if expression.expression:
131        self.unsupported("Hive SORT_ARRAY does not support a comparator")
132    return self.func("SORT_ARRAY", expression.this)
133
134
135def _property_sql(self: Hive.Generator, expression: exp.Property) -> str:
136    return f"{self.property_name(expression, string_key=True)}={self.sql(expression, 'value')}"
137
138
139def _str_to_unix_sql(self: Hive.Generator, expression: exp.StrToUnix) -> str:
140    return self.func("UNIX_TIMESTAMP", expression.this, time_format("hive")(self, expression))
141
142
143def _unix_to_time_sql(self: Hive.Generator, expression: exp.UnixToTime) -> str:
144    timestamp = self.sql(expression, "this")
145    scale = expression.args.get("scale")
146    if scale in (None, exp.UnixToTime.SECONDS):
147        return rename_func("FROM_UNIXTIME")(self, expression)
148
149    return f"FROM_UNIXTIME({timestamp} / POW(10, {scale}))"
150
151
152def _str_to_date_sql(self: Hive.Generator, expression: exp.StrToDate) -> str:
153    this = self.sql(expression, "this")
154    time_format = self.format_time(expression)
155    if time_format not in (Hive.TIME_FORMAT, Hive.DATE_FORMAT):
156        this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))"
157    return f"CAST({this} AS DATE)"
158
159
160def _str_to_time_sql(self: Hive.Generator, expression: exp.StrToTime) -> str:
161    this = self.sql(expression, "this")
162    time_format = self.format_time(expression)
163    if time_format not in (Hive.TIME_FORMAT, Hive.DATE_FORMAT):
164        this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))"
165    return f"CAST({this} AS TIMESTAMP)"
166
167
168def _to_date_sql(self: Hive.Generator, expression: exp.TsOrDsToDate) -> str:
169    time_format = self.format_time(expression)
170    if time_format and time_format not in (Hive.TIME_FORMAT, Hive.DATE_FORMAT):
171        return self.func("TO_DATE", expression.this, time_format)
172
173    if isinstance(expression.this, exp.TsOrDsToDate):
174        return self.sql(expression, "this")
175
176    return self.func("TO_DATE", expression.this)
177
178
179def _build_with_ignore_nulls(
180    exp_class: t.Type[exp.Expression],
181) -> t.Callable[[t.List[exp.Expression]], exp.Expression]:
182    def _parse(args: t.List[exp.Expression]) -> exp.Expression:
183        this = exp_class(this=seq_get(args, 0))
184        if seq_get(args, 1) == exp.true():
185            return exp.IgnoreNulls(this=this)
186        return this
187
188    return _parse
189
190
191class Hive(Dialect):
192    ALIAS_POST_TABLESAMPLE = True
193    IDENTIFIERS_CAN_START_WITH_DIGIT = True
194    SUPPORTS_USER_DEFINED_TYPES = False
195    SAFE_DIVISION = True
196
197    # https://spark.apache.org/docs/latest/sql-ref-identifier.html#description
198    NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
199
200    TIME_MAPPING = {
201        "y": "%Y",
202        "Y": "%Y",
203        "YYYY": "%Y",
204        "yyyy": "%Y",
205        "YY": "%y",
206        "yy": "%y",
207        "MMMM": "%B",
208        "MMM": "%b",
209        "MM": "%m",
210        "M": "%-m",
211        "dd": "%d",
212        "d": "%-d",
213        "HH": "%H",
214        "H": "%-H",
215        "hh": "%I",
216        "h": "%-I",
217        "mm": "%M",
218        "m": "%-M",
219        "ss": "%S",
220        "s": "%-S",
221        "SSSSSS": "%f",
222        "a": "%p",
223        "DD": "%j",
224        "D": "%-j",
225        "E": "%a",
226        "EE": "%a",
227        "EEE": "%a",
228        "EEEE": "%A",
229    }
230
231    DATE_FORMAT = "'yyyy-MM-dd'"
232    DATEINT_FORMAT = "'yyyyMMdd'"
233    TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'"
234
235    class Tokenizer(tokens.Tokenizer):
236        QUOTES = ["'", '"']
237        IDENTIFIERS = ["`"]
238        STRING_ESCAPES = ["\\"]
239
240        SINGLE_TOKENS = {
241            **tokens.Tokenizer.SINGLE_TOKENS,
242            "$": TokenType.PARAMETER,
243        }
244
245        KEYWORDS = {
246            **tokens.Tokenizer.KEYWORDS,
247            "ADD ARCHIVE": TokenType.COMMAND,
248            "ADD ARCHIVES": TokenType.COMMAND,
249            "ADD FILE": TokenType.COMMAND,
250            "ADD FILES": TokenType.COMMAND,
251            "ADD JAR": TokenType.COMMAND,
252            "ADD JARS": TokenType.COMMAND,
253            "MSCK REPAIR": TokenType.COMMAND,
254            "REFRESH": TokenType.REFRESH,
255            "TIMESTAMP AS OF": TokenType.TIMESTAMP_SNAPSHOT,
256            "VERSION AS OF": TokenType.VERSION_SNAPSHOT,
257            "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES,
258        }
259
260        NUMERIC_LITERALS = {
261            "L": "BIGINT",
262            "S": "SMALLINT",
263            "Y": "TINYINT",
264            "D": "DOUBLE",
265            "F": "FLOAT",
266            "BD": "DECIMAL",
267        }
268
269    class Parser(parser.Parser):
270        LOG_DEFAULTS_TO_LN = True
271        STRICT_CAST = False
272        VALUES_FOLLOWED_BY_PAREN = False
273
274        FUNCTIONS = {
275            **parser.Parser.FUNCTIONS,
276            "BASE64": exp.ToBase64.from_arg_list,
277            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
278            "COLLECT_SET": exp.ArrayUniqueAgg.from_arg_list,
279            "DATE_ADD": lambda args: exp.TsOrDsAdd(
280                this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY")
281            ),
282            "DATE_FORMAT": lambda args: build_formatted_time(exp.TimeToStr, "hive")(
283                [
284                    exp.TimeStrToTime(this=seq_get(args, 0)),
285                    seq_get(args, 1),
286                ]
287            ),
288            "DATE_SUB": lambda args: exp.TsOrDsAdd(
289                this=seq_get(args, 0),
290                expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)),
291                unit=exp.Literal.string("DAY"),
292            ),
293            "DATEDIFF": lambda args: exp.DateDiff(
294                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
295                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
296            ),
297            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
298            "FIRST": _build_with_ignore_nulls(exp.First),
299            "FIRST_VALUE": _build_with_ignore_nulls(exp.FirstValue),
300            "FROM_UNIXTIME": build_formatted_time(exp.UnixToStr, "hive", True),
301            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
302            "LAST": _build_with_ignore_nulls(exp.Last),
303            "LAST_VALUE": _build_with_ignore_nulls(exp.LastValue),
304            "LOCATE": locate_to_strposition,
305            "MAP": parser.build_var_map,
306            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
307            "PERCENTILE": exp.Quantile.from_arg_list,
308            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
309            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
310                this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2)
311            ),
312            "SIZE": exp.ArraySize.from_arg_list,
313            "SPLIT": exp.RegexpSplit.from_arg_list,
314            "STR_TO_MAP": lambda args: exp.StrToMap(
315                this=seq_get(args, 0),
316                pair_delim=seq_get(args, 1) or exp.Literal.string(","),
317                key_value_delim=seq_get(args, 2) or exp.Literal.string(":"),
318            ),
319            "TO_DATE": build_formatted_time(exp.TsOrDsToDate, "hive"),
320            "TO_JSON": exp.JSONFormat.from_arg_list,
321            "UNBASE64": exp.FromBase64.from_arg_list,
322            "UNIX_TIMESTAMP": build_formatted_time(exp.StrToUnix, "hive", True),
323            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
324        }
325
326        NO_PAREN_FUNCTION_PARSERS = {
327            **parser.Parser.NO_PAREN_FUNCTION_PARSERS,
328            "TRANSFORM": lambda self: self._parse_transform(),
329        }
330
331        PROPERTY_PARSERS = {
332            **parser.Parser.PROPERTY_PARSERS,
333            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
334                expressions=self._parse_wrapped_csv(self._parse_property)
335            ),
336        }
337
338        def _parse_transform(self) -> t.Optional[exp.Transform | exp.QueryTransform]:
339            if not self._match(TokenType.L_PAREN, advance=False):
340                self._retreat(self._index - 1)
341                return None
342
343            args = self._parse_wrapped_csv(self._parse_lambda)
344            row_format_before = self._parse_row_format(match_row=True)
345
346            record_writer = None
347            if self._match_text_seq("RECORDWRITER"):
348                record_writer = self._parse_string()
349
350            if not self._match(TokenType.USING):
351                return exp.Transform.from_arg_list(args)
352
353            command_script = self._parse_string()
354
355            self._match(TokenType.ALIAS)
356            schema = self._parse_schema()
357
358            row_format_after = self._parse_row_format(match_row=True)
359            record_reader = None
360            if self._match_text_seq("RECORDREADER"):
361                record_reader = self._parse_string()
362
363            return self.expression(
364                exp.QueryTransform,
365                expressions=args,
366                command_script=command_script,
367                schema=schema,
368                row_format_before=row_format_before,
369                record_writer=record_writer,
370                row_format_after=row_format_after,
371                record_reader=record_reader,
372            )
373
374        def _parse_types(
375            self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True
376        ) -> t.Optional[exp.Expression]:
377            """
378            Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to
379            STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0:
380
381                spark-sql (default)> select cast(1234 as varchar(2));
382                23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support
383                char/varchar type and simply treats them as string type. Please use string type
384                directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString
385                to true, so that Spark treat them as string type as same as Spark 3.0 and earlier
386
387                1234
388                Time taken: 4.265 seconds, Fetched 1 row(s)
389
390            This shows that Spark doesn't truncate the value into '12', which is inconsistent with
391            what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly.
392
393            Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html
394            """
395            this = super()._parse_types(
396                check_func=check_func, schema=schema, allow_identifiers=allow_identifiers
397            )
398
399            if this and not schema:
400                return this.transform(
401                    lambda node: (
402                        node.replace(exp.DataType.build("text"))
403                        if isinstance(node, exp.DataType) and node.is_type("char", "varchar")
404                        else node
405                    ),
406                    copy=False,
407                )
408
409            return this
410
411        def _parse_partition_and_order(
412            self,
413        ) -> t.Tuple[t.List[exp.Expression], t.Optional[exp.Expression]]:
414            return (
415                (
416                    self._parse_csv(self._parse_conjunction)
417                    if self._match_set({TokenType.PARTITION_BY, TokenType.DISTRIBUTE_BY})
418                    else []
419                ),
420                super()._parse_order(skip_order_token=self._match(TokenType.SORT_BY)),
421            )
422
423    class Generator(generator.Generator):
424        LIMIT_FETCH = "LIMIT"
425        TABLESAMPLE_WITH_METHOD = False
426        JOIN_HINTS = False
427        TABLE_HINTS = False
428        QUERY_HINTS = False
429        INDEX_ON = "ON TABLE"
430        EXTRACT_ALLOWS_QUOTES = False
431        NVL2_SUPPORTED = False
432        LAST_DAY_SUPPORTS_DATE_PART = False
433        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
434
435        EXPRESSIONS_WITHOUT_NESTED_CTES = {
436            exp.Insert,
437            exp.Select,
438            exp.Subquery,
439            exp.Union,
440        }
441
442        SUPPORTED_JSON_PATH_PARTS = {
443            exp.JSONPathKey,
444            exp.JSONPathRoot,
445            exp.JSONPathSubscript,
446            exp.JSONPathWildcard,
447        }
448
449        TYPE_MAPPING = {
450            **generator.Generator.TYPE_MAPPING,
451            exp.DataType.Type.BIT: "BOOLEAN",
452            exp.DataType.Type.DATETIME: "TIMESTAMP",
453            exp.DataType.Type.TEXT: "STRING",
454            exp.DataType.Type.TIME: "TIMESTAMP",
455            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
456            exp.DataType.Type.VARBINARY: "BINARY",
457        }
458
459        TRANSFORMS = {
460            **generator.Generator.TRANSFORMS,
461            exp.Group: transforms.preprocess([transforms.unalias_group]),
462            exp.Select: transforms.preprocess(
463                [
464                    transforms.eliminate_qualify,
465                    transforms.eliminate_distinct_on,
466                    transforms.unnest_to_explode,
467                ]
468            ),
469            exp.Property: _property_sql,
470            exp.AnyValue: rename_func("FIRST"),
471            exp.ApproxDistinct: approx_count_distinct_sql,
472            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
473            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
474            exp.ArrayConcat: rename_func("CONCAT"),
475            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
476            exp.ArraySize: rename_func("SIZE"),
477            exp.ArraySort: _array_sort_sql,
478            exp.With: no_recursive_cte_sql,
479            exp.DateAdd: _add_date_sql,
480            exp.DateDiff: _date_diff_sql,
481            exp.DateStrToDate: datestrtodate_sql,
482            exp.DateSub: _add_date_sql,
483            exp.DateToDi: lambda self,
484            e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)",
485            exp.DiToDate: lambda self,
486            e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})",
487            exp.FileFormatProperty: lambda self,
488            e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
489            exp.FromBase64: rename_func("UNBASE64"),
490            exp.If: if_sql(),
491            exp.ILike: no_ilike_sql,
492            exp.IsNan: rename_func("ISNAN"),
493            exp.JSONExtract: lambda self, e: self.func("GET_JSON_OBJECT", e.this, e.expression),
494            exp.JSONExtractScalar: lambda self, e: self.func(
495                "GET_JSON_OBJECT", e.this, e.expression
496            ),
497            exp.JSONFormat: _json_format_sql,
498            exp.Left: left_to_substring_sql,
499            exp.Map: var_map_sql,
500            exp.Max: max_or_greatest,
501            exp.MD5Digest: lambda self, e: self.func("UNHEX", self.func("MD5", e.this)),
502            exp.Min: min_or_least,
503            exp.MonthsBetween: lambda self, e: self.func("MONTHS_BETWEEN", e.this, e.expression),
504            exp.NotNullColumnConstraint: lambda _, e: (
505                "" if e.args.get("allow_null") else "NOT NULL"
506            ),
507            exp.VarMap: var_map_sql,
508            exp.Create: preprocess(
509                [
510                    remove_unique_constraints,
511                    ctas_with_tmp_tables_to_create_tmp_view,
512                    move_schema_columns_to_partitioned_by,
513                ]
514            ),
515            exp.Quantile: rename_func("PERCENTILE"),
516            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
517            exp.RegexpExtract: regexp_extract_sql,
518            exp.RegexpReplace: regexp_replace_sql,
519            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
520            exp.RegexpSplit: rename_func("SPLIT"),
521            exp.Right: right_to_substring_sql,
522            exp.SafeDivide: no_safe_divide_sql,
523            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
524            exp.ArrayUniqueAgg: rename_func("COLLECT_SET"),
525            exp.Split: lambda self, e: self.func(
526                "SPLIT", e.this, self.func("CONCAT", "'\\\\Q'", e.expression)
527            ),
528            exp.StrPosition: strposition_to_locate_sql,
529            exp.StrToDate: _str_to_date_sql,
530            exp.StrToTime: _str_to_time_sql,
531            exp.StrToUnix: _str_to_unix_sql,
532            exp.StructExtract: struct_extract_sql,
533            exp.TimeStrToDate: rename_func("TO_DATE"),
534            exp.TimeStrToTime: timestrtotime_sql,
535            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
536            exp.TimeToStr: lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e)),
537            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
538            exp.ToBase64: rename_func("BASE64"),
539            exp.TsOrDiToDi: lambda self,
540            e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
541            exp.TsOrDsAdd: _add_date_sql,
542            exp.TsOrDsDiff: _date_diff_sql,
543            exp.TsOrDsToDate: _to_date_sql,
544            exp.TryCast: no_trycast_sql,
545            exp.UnixToStr: lambda self, e: self.func(
546                "FROM_UNIXTIME", e.this, time_format("hive")(self, e)
547            ),
548            exp.UnixToTime: _unix_to_time_sql,
549            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
550            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
551            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
552            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
553            exp.National: lambda self, e: self.national_sql(e, prefix=""),
554            exp.ClusteredColumnConstraint: lambda self,
555            e: f"({self.expressions(e, 'this', indent=False)})",
556            exp.NonClusteredColumnConstraint: lambda self,
557            e: f"({self.expressions(e, 'this', indent=False)})",
558            exp.NotForReplicationColumnConstraint: lambda *_: "",
559            exp.OnProperty: lambda *_: "",
560            exp.PrimaryKeyColumnConstraint: lambda *_: "PRIMARY KEY",
561        }
562
563        PROPERTIES_LOCATION = {
564            **generator.Generator.PROPERTIES_LOCATION,
565            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
566            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
567            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
568            exp.WithDataProperty: exp.Properties.Location.UNSUPPORTED,
569        }
570
571        def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str:
572            if isinstance(expression.this, exp.JSONPathWildcard):
573                self.unsupported("Unsupported wildcard in JSONPathKey expression")
574                return ""
575
576            return super()._jsonpathkey_sql(expression)
577
578        def parameter_sql(self, expression: exp.Parameter) -> str:
579            this = self.sql(expression, "this")
580            expression_sql = self.sql(expression, "expression")
581
582            parent = expression.parent
583            this = f"{this}:{expression_sql}" if expression_sql else this
584
585            if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem):
586                # We need to produce SET key = value instead of SET ${key} = value
587                return this
588
589            return f"${{{this}}}"
590
591        def schema_sql(self, expression: exp.Schema) -> str:
592            for ordered in expression.find_all(exp.Ordered):
593                if ordered.args.get("desc") is False:
594                    ordered.set("desc", None)
595
596            return super().schema_sql(expression)
597
598        def constraint_sql(self, expression: exp.Constraint) -> str:
599            for prop in list(expression.find_all(exp.Properties)):
600                prop.pop()
601
602            this = self.sql(expression, "this")
603            expressions = self.expressions(expression, sep=" ", flat=True)
604            return f"CONSTRAINT {this} {expressions}"
605
606        def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str:
607            serde_props = self.sql(expression, "serde_properties")
608            serde_props = f" {serde_props}" if serde_props else ""
609            return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}"
610
611        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
612            return self.func(
613                "COLLECT_LIST",
614                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
615            )
616
617        def with_properties(self, properties: exp.Properties) -> str:
618            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
619
620        def datatype_sql(self, expression: exp.DataType) -> str:
621            if expression.this in self.PARAMETERIZABLE_TEXT_TYPES and (
622                not expression.expressions or expression.expressions[0].name == "MAX"
623            ):
624                expression = exp.DataType.build("text")
625            elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions:
626                expression.set("this", exp.DataType.Type.VARCHAR)
627            elif expression.this in exp.DataType.TEMPORAL_TYPES:
628                expression = exp.DataType.build(expression.this)
629            elif expression.is_type("float"):
630                size_expression = expression.find(exp.DataTypeParam)
631                if size_expression:
632                    size = int(size_expression.name)
633                    expression = (
634                        exp.DataType.build("float") if size <= 32 else exp.DataType.build("double")
635                    )
636
637            return super().datatype_sql(expression)
638
639        def version_sql(self, expression: exp.Version) -> str:
640            sql = super().version_sql(expression)
641            return sql.replace("FOR ", "", 1)
642
643        def struct_sql(self, expression: exp.Struct) -> str:
644            values = []
645
646            for i, e in enumerate(expression.expressions):
647                if isinstance(e, exp.PropertyEQ):
648                    self.unsupported("Hive does not support named structs.")
649                    values.append(e.expression)
650                else:
651                    values.append(e)
652
653            return self.func("STRUCT", *values)
DATE_DELTA_INTERVAL = {'YEAR': ('ADD_MONTHS', 12), 'MONTH': ('ADD_MONTHS', 1), 'QUARTER': ('ADD_MONTHS', 3), 'WEEK': ('DATE_ADD', 7), 'DAY': ('DATE_ADD', 1)}
TIME_DIFF_FACTOR = {'MILLISECOND': ' * 1000', 'SECOND': '', 'MINUTE': ' / 60', 'HOUR': ' / 3600'}
DIFF_MONTH_SWITCH = ('YEAR', 'QUARTER', 'MONTH')
class Hive(sqlglot.dialects.dialect.Dialect):
192class Hive(Dialect):
193    ALIAS_POST_TABLESAMPLE = True
194    IDENTIFIERS_CAN_START_WITH_DIGIT = True
195    SUPPORTS_USER_DEFINED_TYPES = False
196    SAFE_DIVISION = True
197
198    # https://spark.apache.org/docs/latest/sql-ref-identifier.html#description
199    NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
200
201    TIME_MAPPING = {
202        "y": "%Y",
203        "Y": "%Y",
204        "YYYY": "%Y",
205        "yyyy": "%Y",
206        "YY": "%y",
207        "yy": "%y",
208        "MMMM": "%B",
209        "MMM": "%b",
210        "MM": "%m",
211        "M": "%-m",
212        "dd": "%d",
213        "d": "%-d",
214        "HH": "%H",
215        "H": "%-H",
216        "hh": "%I",
217        "h": "%-I",
218        "mm": "%M",
219        "m": "%-M",
220        "ss": "%S",
221        "s": "%-S",
222        "SSSSSS": "%f",
223        "a": "%p",
224        "DD": "%j",
225        "D": "%-j",
226        "E": "%a",
227        "EE": "%a",
228        "EEE": "%a",
229        "EEEE": "%A",
230    }
231
232    DATE_FORMAT = "'yyyy-MM-dd'"
233    DATEINT_FORMAT = "'yyyyMMdd'"
234    TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'"
235
236    class Tokenizer(tokens.Tokenizer):
237        QUOTES = ["'", '"']
238        IDENTIFIERS = ["`"]
239        STRING_ESCAPES = ["\\"]
240
241        SINGLE_TOKENS = {
242            **tokens.Tokenizer.SINGLE_TOKENS,
243            "$": TokenType.PARAMETER,
244        }
245
246        KEYWORDS = {
247            **tokens.Tokenizer.KEYWORDS,
248            "ADD ARCHIVE": TokenType.COMMAND,
249            "ADD ARCHIVES": TokenType.COMMAND,
250            "ADD FILE": TokenType.COMMAND,
251            "ADD FILES": TokenType.COMMAND,
252            "ADD JAR": TokenType.COMMAND,
253            "ADD JARS": TokenType.COMMAND,
254            "MSCK REPAIR": TokenType.COMMAND,
255            "REFRESH": TokenType.REFRESH,
256            "TIMESTAMP AS OF": TokenType.TIMESTAMP_SNAPSHOT,
257            "VERSION AS OF": TokenType.VERSION_SNAPSHOT,
258            "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES,
259        }
260
261        NUMERIC_LITERALS = {
262            "L": "BIGINT",
263            "S": "SMALLINT",
264            "Y": "TINYINT",
265            "D": "DOUBLE",
266            "F": "FLOAT",
267            "BD": "DECIMAL",
268        }
269
270    class Parser(parser.Parser):
271        LOG_DEFAULTS_TO_LN = True
272        STRICT_CAST = False
273        VALUES_FOLLOWED_BY_PAREN = False
274
275        FUNCTIONS = {
276            **parser.Parser.FUNCTIONS,
277            "BASE64": exp.ToBase64.from_arg_list,
278            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
279            "COLLECT_SET": exp.ArrayUniqueAgg.from_arg_list,
280            "DATE_ADD": lambda args: exp.TsOrDsAdd(
281                this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY")
282            ),
283            "DATE_FORMAT": lambda args: build_formatted_time(exp.TimeToStr, "hive")(
284                [
285                    exp.TimeStrToTime(this=seq_get(args, 0)),
286                    seq_get(args, 1),
287                ]
288            ),
289            "DATE_SUB": lambda args: exp.TsOrDsAdd(
290                this=seq_get(args, 0),
291                expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)),
292                unit=exp.Literal.string("DAY"),
293            ),
294            "DATEDIFF": lambda args: exp.DateDiff(
295                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
296                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
297            ),
298            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
299            "FIRST": _build_with_ignore_nulls(exp.First),
300            "FIRST_VALUE": _build_with_ignore_nulls(exp.FirstValue),
301            "FROM_UNIXTIME": build_formatted_time(exp.UnixToStr, "hive", True),
302            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
303            "LAST": _build_with_ignore_nulls(exp.Last),
304            "LAST_VALUE": _build_with_ignore_nulls(exp.LastValue),
305            "LOCATE": locate_to_strposition,
306            "MAP": parser.build_var_map,
307            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
308            "PERCENTILE": exp.Quantile.from_arg_list,
309            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
310            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
311                this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2)
312            ),
313            "SIZE": exp.ArraySize.from_arg_list,
314            "SPLIT": exp.RegexpSplit.from_arg_list,
315            "STR_TO_MAP": lambda args: exp.StrToMap(
316                this=seq_get(args, 0),
317                pair_delim=seq_get(args, 1) or exp.Literal.string(","),
318                key_value_delim=seq_get(args, 2) or exp.Literal.string(":"),
319            ),
320            "TO_DATE": build_formatted_time(exp.TsOrDsToDate, "hive"),
321            "TO_JSON": exp.JSONFormat.from_arg_list,
322            "UNBASE64": exp.FromBase64.from_arg_list,
323            "UNIX_TIMESTAMP": build_formatted_time(exp.StrToUnix, "hive", True),
324            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
325        }
326
327        NO_PAREN_FUNCTION_PARSERS = {
328            **parser.Parser.NO_PAREN_FUNCTION_PARSERS,
329            "TRANSFORM": lambda self: self._parse_transform(),
330        }
331
332        PROPERTY_PARSERS = {
333            **parser.Parser.PROPERTY_PARSERS,
334            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
335                expressions=self._parse_wrapped_csv(self._parse_property)
336            ),
337        }
338
339        def _parse_transform(self) -> t.Optional[exp.Transform | exp.QueryTransform]:
340            if not self._match(TokenType.L_PAREN, advance=False):
341                self._retreat(self._index - 1)
342                return None
343
344            args = self._parse_wrapped_csv(self._parse_lambda)
345            row_format_before = self._parse_row_format(match_row=True)
346
347            record_writer = None
348            if self._match_text_seq("RECORDWRITER"):
349                record_writer = self._parse_string()
350
351            if not self._match(TokenType.USING):
352                return exp.Transform.from_arg_list(args)
353
354            command_script = self._parse_string()
355
356            self._match(TokenType.ALIAS)
357            schema = self._parse_schema()
358
359            row_format_after = self._parse_row_format(match_row=True)
360            record_reader = None
361            if self._match_text_seq("RECORDREADER"):
362                record_reader = self._parse_string()
363
364            return self.expression(
365                exp.QueryTransform,
366                expressions=args,
367                command_script=command_script,
368                schema=schema,
369                row_format_before=row_format_before,
370                record_writer=record_writer,
371                row_format_after=row_format_after,
372                record_reader=record_reader,
373            )
374
375        def _parse_types(
376            self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True
377        ) -> t.Optional[exp.Expression]:
378            """
379            Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to
380            STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0:
381
382                spark-sql (default)> select cast(1234 as varchar(2));
383                23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support
384                char/varchar type and simply treats them as string type. Please use string type
385                directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString
386                to true, so that Spark treat them as string type as same as Spark 3.0 and earlier
387
388                1234
389                Time taken: 4.265 seconds, Fetched 1 row(s)
390
391            This shows that Spark doesn't truncate the value into '12', which is inconsistent with
392            what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly.
393
394            Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html
395            """
396            this = super()._parse_types(
397                check_func=check_func, schema=schema, allow_identifiers=allow_identifiers
398            )
399
400            if this and not schema:
401                return this.transform(
402                    lambda node: (
403                        node.replace(exp.DataType.build("text"))
404                        if isinstance(node, exp.DataType) and node.is_type("char", "varchar")
405                        else node
406                    ),
407                    copy=False,
408                )
409
410            return this
411
412        def _parse_partition_and_order(
413            self,
414        ) -> t.Tuple[t.List[exp.Expression], t.Optional[exp.Expression]]:
415            return (
416                (
417                    self._parse_csv(self._parse_conjunction)
418                    if self._match_set({TokenType.PARTITION_BY, TokenType.DISTRIBUTE_BY})
419                    else []
420                ),
421                super()._parse_order(skip_order_token=self._match(TokenType.SORT_BY)),
422            )
423
424    class Generator(generator.Generator):
425        LIMIT_FETCH = "LIMIT"
426        TABLESAMPLE_WITH_METHOD = False
427        JOIN_HINTS = False
428        TABLE_HINTS = False
429        QUERY_HINTS = False
430        INDEX_ON = "ON TABLE"
431        EXTRACT_ALLOWS_QUOTES = False
432        NVL2_SUPPORTED = False
433        LAST_DAY_SUPPORTS_DATE_PART = False
434        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
435
436        EXPRESSIONS_WITHOUT_NESTED_CTES = {
437            exp.Insert,
438            exp.Select,
439            exp.Subquery,
440            exp.Union,
441        }
442
443        SUPPORTED_JSON_PATH_PARTS = {
444            exp.JSONPathKey,
445            exp.JSONPathRoot,
446            exp.JSONPathSubscript,
447            exp.JSONPathWildcard,
448        }
449
450        TYPE_MAPPING = {
451            **generator.Generator.TYPE_MAPPING,
452            exp.DataType.Type.BIT: "BOOLEAN",
453            exp.DataType.Type.DATETIME: "TIMESTAMP",
454            exp.DataType.Type.TEXT: "STRING",
455            exp.DataType.Type.TIME: "TIMESTAMP",
456            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
457            exp.DataType.Type.VARBINARY: "BINARY",
458        }
459
460        TRANSFORMS = {
461            **generator.Generator.TRANSFORMS,
462            exp.Group: transforms.preprocess([transforms.unalias_group]),
463            exp.Select: transforms.preprocess(
464                [
465                    transforms.eliminate_qualify,
466                    transforms.eliminate_distinct_on,
467                    transforms.unnest_to_explode,
468                ]
469            ),
470            exp.Property: _property_sql,
471            exp.AnyValue: rename_func("FIRST"),
472            exp.ApproxDistinct: approx_count_distinct_sql,
473            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
474            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
475            exp.ArrayConcat: rename_func("CONCAT"),
476            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
477            exp.ArraySize: rename_func("SIZE"),
478            exp.ArraySort: _array_sort_sql,
479            exp.With: no_recursive_cte_sql,
480            exp.DateAdd: _add_date_sql,
481            exp.DateDiff: _date_diff_sql,
482            exp.DateStrToDate: datestrtodate_sql,
483            exp.DateSub: _add_date_sql,
484            exp.DateToDi: lambda self,
485            e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)",
486            exp.DiToDate: lambda self,
487            e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})",
488            exp.FileFormatProperty: lambda self,
489            e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
490            exp.FromBase64: rename_func("UNBASE64"),
491            exp.If: if_sql(),
492            exp.ILike: no_ilike_sql,
493            exp.IsNan: rename_func("ISNAN"),
494            exp.JSONExtract: lambda self, e: self.func("GET_JSON_OBJECT", e.this, e.expression),
495            exp.JSONExtractScalar: lambda self, e: self.func(
496                "GET_JSON_OBJECT", e.this, e.expression
497            ),
498            exp.JSONFormat: _json_format_sql,
499            exp.Left: left_to_substring_sql,
500            exp.Map: var_map_sql,
501            exp.Max: max_or_greatest,
502            exp.MD5Digest: lambda self, e: self.func("UNHEX", self.func("MD5", e.this)),
503            exp.Min: min_or_least,
504            exp.MonthsBetween: lambda self, e: self.func("MONTHS_BETWEEN", e.this, e.expression),
505            exp.NotNullColumnConstraint: lambda _, e: (
506                "" if e.args.get("allow_null") else "NOT NULL"
507            ),
508            exp.VarMap: var_map_sql,
509            exp.Create: preprocess(
510                [
511                    remove_unique_constraints,
512                    ctas_with_tmp_tables_to_create_tmp_view,
513                    move_schema_columns_to_partitioned_by,
514                ]
515            ),
516            exp.Quantile: rename_func("PERCENTILE"),
517            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
518            exp.RegexpExtract: regexp_extract_sql,
519            exp.RegexpReplace: regexp_replace_sql,
520            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
521            exp.RegexpSplit: rename_func("SPLIT"),
522            exp.Right: right_to_substring_sql,
523            exp.SafeDivide: no_safe_divide_sql,
524            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
525            exp.ArrayUniqueAgg: rename_func("COLLECT_SET"),
526            exp.Split: lambda self, e: self.func(
527                "SPLIT", e.this, self.func("CONCAT", "'\\\\Q'", e.expression)
528            ),
529            exp.StrPosition: strposition_to_locate_sql,
530            exp.StrToDate: _str_to_date_sql,
531            exp.StrToTime: _str_to_time_sql,
532            exp.StrToUnix: _str_to_unix_sql,
533            exp.StructExtract: struct_extract_sql,
534            exp.TimeStrToDate: rename_func("TO_DATE"),
535            exp.TimeStrToTime: timestrtotime_sql,
536            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
537            exp.TimeToStr: lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e)),
538            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
539            exp.ToBase64: rename_func("BASE64"),
540            exp.TsOrDiToDi: lambda self,
541            e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
542            exp.TsOrDsAdd: _add_date_sql,
543            exp.TsOrDsDiff: _date_diff_sql,
544            exp.TsOrDsToDate: _to_date_sql,
545            exp.TryCast: no_trycast_sql,
546            exp.UnixToStr: lambda self, e: self.func(
547                "FROM_UNIXTIME", e.this, time_format("hive")(self, e)
548            ),
549            exp.UnixToTime: _unix_to_time_sql,
550            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
551            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
552            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
553            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
554            exp.National: lambda self, e: self.national_sql(e, prefix=""),
555            exp.ClusteredColumnConstraint: lambda self,
556            e: f"({self.expressions(e, 'this', indent=False)})",
557            exp.NonClusteredColumnConstraint: lambda self,
558            e: f"({self.expressions(e, 'this', indent=False)})",
559            exp.NotForReplicationColumnConstraint: lambda *_: "",
560            exp.OnProperty: lambda *_: "",
561            exp.PrimaryKeyColumnConstraint: lambda *_: "PRIMARY KEY",
562        }
563
564        PROPERTIES_LOCATION = {
565            **generator.Generator.PROPERTIES_LOCATION,
566            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
567            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
568            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
569            exp.WithDataProperty: exp.Properties.Location.UNSUPPORTED,
570        }
571
572        def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str:
573            if isinstance(expression.this, exp.JSONPathWildcard):
574                self.unsupported("Unsupported wildcard in JSONPathKey expression")
575                return ""
576
577            return super()._jsonpathkey_sql(expression)
578
579        def parameter_sql(self, expression: exp.Parameter) -> str:
580            this = self.sql(expression, "this")
581            expression_sql = self.sql(expression, "expression")
582
583            parent = expression.parent
584            this = f"{this}:{expression_sql}" if expression_sql else this
585
586            if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem):
587                # We need to produce SET key = value instead of SET ${key} = value
588                return this
589
590            return f"${{{this}}}"
591
592        def schema_sql(self, expression: exp.Schema) -> str:
593            for ordered in expression.find_all(exp.Ordered):
594                if ordered.args.get("desc") is False:
595                    ordered.set("desc", None)
596
597            return super().schema_sql(expression)
598
599        def constraint_sql(self, expression: exp.Constraint) -> str:
600            for prop in list(expression.find_all(exp.Properties)):
601                prop.pop()
602
603            this = self.sql(expression, "this")
604            expressions = self.expressions(expression, sep=" ", flat=True)
605            return f"CONSTRAINT {this} {expressions}"
606
607        def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str:
608            serde_props = self.sql(expression, "serde_properties")
609            serde_props = f" {serde_props}" if serde_props else ""
610            return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}"
611
612        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
613            return self.func(
614                "COLLECT_LIST",
615                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
616            )
617
618        def with_properties(self, properties: exp.Properties) -> str:
619            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
620
621        def datatype_sql(self, expression: exp.DataType) -> str:
622            if expression.this in self.PARAMETERIZABLE_TEXT_TYPES and (
623                not expression.expressions or expression.expressions[0].name == "MAX"
624            ):
625                expression = exp.DataType.build("text")
626            elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions:
627                expression.set("this", exp.DataType.Type.VARCHAR)
628            elif expression.this in exp.DataType.TEMPORAL_TYPES:
629                expression = exp.DataType.build(expression.this)
630            elif expression.is_type("float"):
631                size_expression = expression.find(exp.DataTypeParam)
632                if size_expression:
633                    size = int(size_expression.name)
634                    expression = (
635                        exp.DataType.build("float") if size <= 32 else exp.DataType.build("double")
636                    )
637
638            return super().datatype_sql(expression)
639
640        def version_sql(self, expression: exp.Version) -> str:
641            sql = super().version_sql(expression)
642            return sql.replace("FOR ", "", 1)
643
644        def struct_sql(self, expression: exp.Struct) -> str:
645            values = []
646
647            for i, e in enumerate(expression.expressions):
648                if isinstance(e, exp.PropertyEQ):
649                    self.unsupported("Hive does not support named structs.")
650                    values.append(e.expression)
651                else:
652                    values.append(e)
653
654            return self.func("STRUCT", *values)
ALIAS_POST_TABLESAMPLE = True

Whether the table alias comes after tablesample.

IDENTIFIERS_CAN_START_WITH_DIGIT = True

Whether an unquoted identifier can start with a digit.

SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SAFE_DIVISION = True

Whether division by zero throws an error (False) or returns NULL (True).

NORMALIZATION_STRATEGY = <NormalizationStrategy.CASE_INSENSITIVE: 'CASE_INSENSITIVE'>

Specifies the strategy according to which identifiers should be normalized.

TIME_MAPPING: Dict[str, str] = {'y': '%Y', 'Y': '%Y', 'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'MMM': '%b', 'MM': '%m', 'M': '%-m', 'dd': '%d', 'd': '%-d', 'HH': '%H', 'H': '%-H', 'hh': '%I', 'h': '%-I', 'mm': '%M', 'm': '%-M', 'ss': '%S', 's': '%-S', 'SSSSSS': '%f', 'a': '%p', 'DD': '%j', 'D': '%-j', 'E': '%a', 'EE': '%a', 'EEE': '%a', 'EEEE': '%A'}

Associates this dialect's time formats with their equivalent Python strftime formats.

DATE_FORMAT = "'yyyy-MM-dd'"
DATEINT_FORMAT = "'yyyyMMdd'"
TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'"
tokenizer_class = <class 'Hive.Tokenizer'>
parser_class = <class 'Hive.Parser'>
generator_class = <class 'Hive.Generator'>
TIME_TRIE: Dict = {'y': {0: True, 'y': {'y': {'y': {0: True}}, 0: True}}, 'Y': {0: True, 'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}, 0: True}, 0: True}, 0: True}, 'd': {'d': {0: True}, 0: True}, 'H': {'H': {0: True}, 0: True}, 'h': {'h': {0: True}, 0: True}, 'm': {'m': {0: True}, 0: True}, 's': {'s': {0: True}, 0: True}, 'S': {'S': {'S': {'S': {'S': {'S': {0: True}}}}}}, 'a': {0: True}, 'D': {'D': {0: True}, 0: True}, 'E': {0: True, 'E': {0: True, 'E': {0: True, 'E': {0: True}}}}}
FORMAT_TRIE: Dict = {'y': {0: True, 'y': {'y': {'y': {0: True}}, 0: True}}, 'Y': {0: True, 'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}, 0: True}, 0: True}, 0: True}, 'd': {'d': {0: True}, 0: True}, 'H': {'H': {0: True}, 0: True}, 'h': {'h': {0: True}, 0: True}, 'm': {'m': {0: True}, 0: True}, 's': {'s': {0: True}, 0: True}, 'S': {'S': {'S': {'S': {'S': {'S': {0: True}}}}}}, 'a': {0: True}, 'D': {'D': {0: True}, 0: True}, 'E': {0: True, 'E': {0: True, 'E': {0: True, 'E': {0: True}}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'MMMM', '%b': 'MMM', '%m': 'MM', '%-m': 'M', '%d': 'dd', '%-d': 'd', '%H': 'HH', '%-H': 'H', '%I': 'hh', '%-I': 'h', '%M': 'mm', '%-M': 'm', '%S': 'ss', '%-S': 's', '%f': 'SSSSSS', '%p': 'a', '%j': 'DD', '%-j': 'D', '%a': 'EEE', '%A': 'EEEE'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, '-': {'m': {0: True}, 'd': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'j': {0: True}}, 'd': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}, 'p': {0: True}, 'j': {0: True}, 'a': {0: True}, 'A': {0: True}}}
INVERSE_ESCAPE_SEQUENCES: Dict[str, str] = {}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = None
HEX_END: Optional[str] = None
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Hive.Tokenizer(sqlglot.tokens.Tokenizer):
236    class Tokenizer(tokens.Tokenizer):
237        QUOTES = ["'", '"']
238        IDENTIFIERS = ["`"]
239        STRING_ESCAPES = ["\\"]
240
241        SINGLE_TOKENS = {
242            **tokens.Tokenizer.SINGLE_TOKENS,
243            "$": TokenType.PARAMETER,
244        }
245
246        KEYWORDS = {
247            **tokens.Tokenizer.KEYWORDS,
248            "ADD ARCHIVE": TokenType.COMMAND,
249            "ADD ARCHIVES": TokenType.COMMAND,
250            "ADD FILE": TokenType.COMMAND,
251            "ADD FILES": TokenType.COMMAND,
252            "ADD JAR": TokenType.COMMAND,
253            "ADD JARS": TokenType.COMMAND,
254            "MSCK REPAIR": TokenType.COMMAND,
255            "REFRESH": TokenType.REFRESH,
256            "TIMESTAMP AS OF": TokenType.TIMESTAMP_SNAPSHOT,
257            "VERSION AS OF": TokenType.VERSION_SNAPSHOT,
258            "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES,
259        }
260
261        NUMERIC_LITERALS = {
262            "L": "BIGINT",
263            "S": "SMALLINT",
264            "Y": "TINYINT",
265            "D": "DOUBLE",
266            "F": "FLOAT",
267            "BD": "DECIMAL",
268        }
QUOTES = ["'", '"']
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '`': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '"': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '#': <TokenType.HASH: 'HASH'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'ADD ARCHIVE': <TokenType.COMMAND: 'COMMAND'>, 'ADD ARCHIVES': <TokenType.COMMAND: 'COMMAND'>, 'ADD FILE': <TokenType.COMMAND: 'COMMAND'>, 'ADD FILES': <TokenType.COMMAND: 'COMMAND'>, 'ADD JAR': <TokenType.COMMAND: 'COMMAND'>, 'ADD JARS': <TokenType.COMMAND: 'COMMAND'>, 'MSCK REPAIR': <TokenType.COMMAND: 'COMMAND'>, 'REFRESH': <TokenType.REFRESH: 'REFRESH'>, 'TIMESTAMP AS OF': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'VERSION AS OF': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'WITH SERDEPROPERTIES': <TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>}
NUMERIC_LITERALS = {'L': 'BIGINT', 'S': 'SMALLINT', 'Y': 'TINYINT', 'D': 'DOUBLE', 'F': 'FLOAT', 'BD': 'DECIMAL'}
class Hive.Parser(sqlglot.parser.Parser):
270    class Parser(parser.Parser):
271        LOG_DEFAULTS_TO_LN = True
272        STRICT_CAST = False
273        VALUES_FOLLOWED_BY_PAREN = False
274
275        FUNCTIONS = {
276            **parser.Parser.FUNCTIONS,
277            "BASE64": exp.ToBase64.from_arg_list,
278            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
279            "COLLECT_SET": exp.ArrayUniqueAgg.from_arg_list,
280            "DATE_ADD": lambda args: exp.TsOrDsAdd(
281                this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY")
282            ),
283            "DATE_FORMAT": lambda args: build_formatted_time(exp.TimeToStr, "hive")(
284                [
285                    exp.TimeStrToTime(this=seq_get(args, 0)),
286                    seq_get(args, 1),
287                ]
288            ),
289            "DATE_SUB": lambda args: exp.TsOrDsAdd(
290                this=seq_get(args, 0),
291                expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)),
292                unit=exp.Literal.string("DAY"),
293            ),
294            "DATEDIFF": lambda args: exp.DateDiff(
295                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
296                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
297            ),
298            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
299            "FIRST": _build_with_ignore_nulls(exp.First),
300            "FIRST_VALUE": _build_with_ignore_nulls(exp.FirstValue),
301            "FROM_UNIXTIME": build_formatted_time(exp.UnixToStr, "hive", True),
302            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
303            "LAST": _build_with_ignore_nulls(exp.Last),
304            "LAST_VALUE": _build_with_ignore_nulls(exp.LastValue),
305            "LOCATE": locate_to_strposition,
306            "MAP": parser.build_var_map,
307            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
308            "PERCENTILE": exp.Quantile.from_arg_list,
309            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
310            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
311                this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2)
312            ),
313            "SIZE": exp.ArraySize.from_arg_list,
314            "SPLIT": exp.RegexpSplit.from_arg_list,
315            "STR_TO_MAP": lambda args: exp.StrToMap(
316                this=seq_get(args, 0),
317                pair_delim=seq_get(args, 1) or exp.Literal.string(","),
318                key_value_delim=seq_get(args, 2) or exp.Literal.string(":"),
319            ),
320            "TO_DATE": build_formatted_time(exp.TsOrDsToDate, "hive"),
321            "TO_JSON": exp.JSONFormat.from_arg_list,
322            "UNBASE64": exp.FromBase64.from_arg_list,
323            "UNIX_TIMESTAMP": build_formatted_time(exp.StrToUnix, "hive", True),
324            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
325        }
326
327        NO_PAREN_FUNCTION_PARSERS = {
328            **parser.Parser.NO_PAREN_FUNCTION_PARSERS,
329            "TRANSFORM": lambda self: self._parse_transform(),
330        }
331
332        PROPERTY_PARSERS = {
333            **parser.Parser.PROPERTY_PARSERS,
334            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
335                expressions=self._parse_wrapped_csv(self._parse_property)
336            ),
337        }
338
339        def _parse_transform(self) -> t.Optional[exp.Transform | exp.QueryTransform]:
340            if not self._match(TokenType.L_PAREN, advance=False):
341                self._retreat(self._index - 1)
342                return None
343
344            args = self._parse_wrapped_csv(self._parse_lambda)
345            row_format_before = self._parse_row_format(match_row=True)
346
347            record_writer = None
348            if self._match_text_seq("RECORDWRITER"):
349                record_writer = self._parse_string()
350
351            if not self._match(TokenType.USING):
352                return exp.Transform.from_arg_list(args)
353
354            command_script = self._parse_string()
355
356            self._match(TokenType.ALIAS)
357            schema = self._parse_schema()
358
359            row_format_after = self._parse_row_format(match_row=True)
360            record_reader = None
361            if self._match_text_seq("RECORDREADER"):
362                record_reader = self._parse_string()
363
364            return self.expression(
365                exp.QueryTransform,
366                expressions=args,
367                command_script=command_script,
368                schema=schema,
369                row_format_before=row_format_before,
370                record_writer=record_writer,
371                row_format_after=row_format_after,
372                record_reader=record_reader,
373            )
374
375        def _parse_types(
376            self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True
377        ) -> t.Optional[exp.Expression]:
378            """
379            Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to
380            STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0:
381
382                spark-sql (default)> select cast(1234 as varchar(2));
383                23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support
384                char/varchar type and simply treats them as string type. Please use string type
385                directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString
386                to true, so that Spark treat them as string type as same as Spark 3.0 and earlier
387
388                1234
389                Time taken: 4.265 seconds, Fetched 1 row(s)
390
391            This shows that Spark doesn't truncate the value into '12', which is inconsistent with
392            what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly.
393
394            Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html
395            """
396            this = super()._parse_types(
397                check_func=check_func, schema=schema, allow_identifiers=allow_identifiers
398            )
399
400            if this and not schema:
401                return this.transform(
402                    lambda node: (
403                        node.replace(exp.DataType.build("text"))
404                        if isinstance(node, exp.DataType) and node.is_type("char", "varchar")
405                        else node
406                    ),
407                    copy=False,
408                )
409
410            return this
411
412        def _parse_partition_and_order(
413            self,
414        ) -> t.Tuple[t.List[exp.Expression], t.Optional[exp.Expression]]:
415            return (
416                (
417                    self._parse_csv(self._parse_conjunction)
418                    if self._match_set({TokenType.PARTITION_BY, TokenType.DISTRIBUTE_BY})
419                    else []
420                ),
421                super()._parse_order(skip_order_token=self._match(TokenType.SORT_BY)),
422            )

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
LOG_DEFAULTS_TO_LN = True
STRICT_CAST = False
VALUES_FOLLOWED_BY_PAREN = False
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Date'>>, 'DATE_ADD': <function Hive.Parser.<lambda>>, 'DATEDIFF': <function Hive.Parser.<lambda>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function Hive.Parser.<lambda>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateTrunc'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <function Hive.Parser.<lambda>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <function _build_with_ignore_nulls.<locals>._parse>, 'FIRST_VALUE': <function _build_with_ignore_nulls.<locals>._parse>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Flatten'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <function _build_with_ignore_nulls.<locals>._parse>, 'LAST_DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <function _build_with_ignore_nulls.<locals>._parse>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <function build_var_map>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <function Hive.Parser.<lambda>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function Hive.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <function Hive.Parser.<lambda>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampFromParts'>>, 'TIMESTAMPFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampFromParts'>>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <function Hive.Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'COLLECT_LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'COLLECT_SET': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'DATE_FORMAT': <function Hive.Parser.<lambda>>, 'FROM_UNIXTIME': <function build_formatted_time.<locals>._builder>, 'GET_JSON_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'LOCATE': <function locate_to_strposition>, 'PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'PERCENTILE_APPROX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'TO_DATE': <function build_formatted_time.<locals>._builder>, 'TO_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'UNBASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'UNIX_TIMESTAMP': <function build_formatted_time.<locals>._builder>}
NO_PAREN_FUNCTION_PARSERS = {'ANY': <function Parser.<lambda>>, 'CASE': <function Parser.<lambda>>, 'IF': <function Parser.<lambda>>, 'NEXT': <function Parser.<lambda>>, 'TRANSFORM': <function Hive.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'WITH SERDEPROPERTIES': <function Hive.Parser.<lambda>>}
SHOW_TRIE: Dict = {}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ID_VAR_TOKENS
INTERVAL_VARS
TABLE_ALIAS_TOKENS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
TIMESTAMPS
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
LAMBDAS
COLUMN_OPERATORS
EXPRESSION_PARSERS
STATEMENT_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
RANGE_PARSERS
CONSTRAINT_PARSERS
ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
FUNCTION_PARSERS
QUERY_MODIFIER_PARSERS
SET_PARSERS
SHOW_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
USABLES
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
PREFIXED_PIVOT_COLUMNS
IDENTIFY_PIVOT_STRINGS
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_UNION
UNION_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
SUPPORTS_IMPLICIT_UNNEST
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Hive.Generator(sqlglot.generator.Generator):
424    class Generator(generator.Generator):
425        LIMIT_FETCH = "LIMIT"
426        TABLESAMPLE_WITH_METHOD = False
427        JOIN_HINTS = False
428        TABLE_HINTS = False
429        QUERY_HINTS = False
430        INDEX_ON = "ON TABLE"
431        EXTRACT_ALLOWS_QUOTES = False
432        NVL2_SUPPORTED = False
433        LAST_DAY_SUPPORTS_DATE_PART = False
434        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
435
436        EXPRESSIONS_WITHOUT_NESTED_CTES = {
437            exp.Insert,
438            exp.Select,
439            exp.Subquery,
440            exp.Union,
441        }
442
443        SUPPORTED_JSON_PATH_PARTS = {
444            exp.JSONPathKey,
445            exp.JSONPathRoot,
446            exp.JSONPathSubscript,
447            exp.JSONPathWildcard,
448        }
449
450        TYPE_MAPPING = {
451            **generator.Generator.TYPE_MAPPING,
452            exp.DataType.Type.BIT: "BOOLEAN",
453            exp.DataType.Type.DATETIME: "TIMESTAMP",
454            exp.DataType.Type.TEXT: "STRING",
455            exp.DataType.Type.TIME: "TIMESTAMP",
456            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
457            exp.DataType.Type.VARBINARY: "BINARY",
458        }
459
460        TRANSFORMS = {
461            **generator.Generator.TRANSFORMS,
462            exp.Group: transforms.preprocess([transforms.unalias_group]),
463            exp.Select: transforms.preprocess(
464                [
465                    transforms.eliminate_qualify,
466                    transforms.eliminate_distinct_on,
467                    transforms.unnest_to_explode,
468                ]
469            ),
470            exp.Property: _property_sql,
471            exp.AnyValue: rename_func("FIRST"),
472            exp.ApproxDistinct: approx_count_distinct_sql,
473            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
474            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
475            exp.ArrayConcat: rename_func("CONCAT"),
476            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
477            exp.ArraySize: rename_func("SIZE"),
478            exp.ArraySort: _array_sort_sql,
479            exp.With: no_recursive_cte_sql,
480            exp.DateAdd: _add_date_sql,
481            exp.DateDiff: _date_diff_sql,
482            exp.DateStrToDate: datestrtodate_sql,
483            exp.DateSub: _add_date_sql,
484            exp.DateToDi: lambda self,
485            e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)",
486            exp.DiToDate: lambda self,
487            e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})",
488            exp.FileFormatProperty: lambda self,
489            e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
490            exp.FromBase64: rename_func("UNBASE64"),
491            exp.If: if_sql(),
492            exp.ILike: no_ilike_sql,
493            exp.IsNan: rename_func("ISNAN"),
494            exp.JSONExtract: lambda self, e: self.func("GET_JSON_OBJECT", e.this, e.expression),
495            exp.JSONExtractScalar: lambda self, e: self.func(
496                "GET_JSON_OBJECT", e.this, e.expression
497            ),
498            exp.JSONFormat: _json_format_sql,
499            exp.Left: left_to_substring_sql,
500            exp.Map: var_map_sql,
501            exp.Max: max_or_greatest,
502            exp.MD5Digest: lambda self, e: self.func("UNHEX", self.func("MD5", e.this)),
503            exp.Min: min_or_least,
504            exp.MonthsBetween: lambda self, e: self.func("MONTHS_BETWEEN", e.this, e.expression),
505            exp.NotNullColumnConstraint: lambda _, e: (
506                "" if e.args.get("allow_null") else "NOT NULL"
507            ),
508            exp.VarMap: var_map_sql,
509            exp.Create: preprocess(
510                [
511                    remove_unique_constraints,
512                    ctas_with_tmp_tables_to_create_tmp_view,
513                    move_schema_columns_to_partitioned_by,
514                ]
515            ),
516            exp.Quantile: rename_func("PERCENTILE"),
517            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
518            exp.RegexpExtract: regexp_extract_sql,
519            exp.RegexpReplace: regexp_replace_sql,
520            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
521            exp.RegexpSplit: rename_func("SPLIT"),
522            exp.Right: right_to_substring_sql,
523            exp.SafeDivide: no_safe_divide_sql,
524            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
525            exp.ArrayUniqueAgg: rename_func("COLLECT_SET"),
526            exp.Split: lambda self, e: self.func(
527                "SPLIT", e.this, self.func("CONCAT", "'\\\\Q'", e.expression)
528            ),
529            exp.StrPosition: strposition_to_locate_sql,
530            exp.StrToDate: _str_to_date_sql,
531            exp.StrToTime: _str_to_time_sql,
532            exp.StrToUnix: _str_to_unix_sql,
533            exp.StructExtract: struct_extract_sql,
534            exp.TimeStrToDate: rename_func("TO_DATE"),
535            exp.TimeStrToTime: timestrtotime_sql,
536            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
537            exp.TimeToStr: lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e)),
538            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
539            exp.ToBase64: rename_func("BASE64"),
540            exp.TsOrDiToDi: lambda self,
541            e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
542            exp.TsOrDsAdd: _add_date_sql,
543            exp.TsOrDsDiff: _date_diff_sql,
544            exp.TsOrDsToDate: _to_date_sql,
545            exp.TryCast: no_trycast_sql,
546            exp.UnixToStr: lambda self, e: self.func(
547                "FROM_UNIXTIME", e.this, time_format("hive")(self, e)
548            ),
549            exp.UnixToTime: _unix_to_time_sql,
550            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
551            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
552            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
553            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
554            exp.National: lambda self, e: self.national_sql(e, prefix=""),
555            exp.ClusteredColumnConstraint: lambda self,
556            e: f"({self.expressions(e, 'this', indent=False)})",
557            exp.NonClusteredColumnConstraint: lambda self,
558            e: f"({self.expressions(e, 'this', indent=False)})",
559            exp.NotForReplicationColumnConstraint: lambda *_: "",
560            exp.OnProperty: lambda *_: "",
561            exp.PrimaryKeyColumnConstraint: lambda *_: "PRIMARY KEY",
562        }
563
564        PROPERTIES_LOCATION = {
565            **generator.Generator.PROPERTIES_LOCATION,
566            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
567            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
568            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
569            exp.WithDataProperty: exp.Properties.Location.UNSUPPORTED,
570        }
571
572        def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str:
573            if isinstance(expression.this, exp.JSONPathWildcard):
574                self.unsupported("Unsupported wildcard in JSONPathKey expression")
575                return ""
576
577            return super()._jsonpathkey_sql(expression)
578
579        def parameter_sql(self, expression: exp.Parameter) -> str:
580            this = self.sql(expression, "this")
581            expression_sql = self.sql(expression, "expression")
582
583            parent = expression.parent
584            this = f"{this}:{expression_sql}" if expression_sql else this
585
586            if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem):
587                # We need to produce SET key = value instead of SET ${key} = value
588                return this
589
590            return f"${{{this}}}"
591
592        def schema_sql(self, expression: exp.Schema) -> str:
593            for ordered in expression.find_all(exp.Ordered):
594                if ordered.args.get("desc") is False:
595                    ordered.set("desc", None)
596
597            return super().schema_sql(expression)
598
599        def constraint_sql(self, expression: exp.Constraint) -> str:
600            for prop in list(expression.find_all(exp.Properties)):
601                prop.pop()
602
603            this = self.sql(expression, "this")
604            expressions = self.expressions(expression, sep=" ", flat=True)
605            return f"CONSTRAINT {this} {expressions}"
606
607        def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str:
608            serde_props = self.sql(expression, "serde_properties")
609            serde_props = f" {serde_props}" if serde_props else ""
610            return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}"
611
612        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
613            return self.func(
614                "COLLECT_LIST",
615                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
616            )
617
618        def with_properties(self, properties: exp.Properties) -> str:
619            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
620
621        def datatype_sql(self, expression: exp.DataType) -> str:
622            if expression.this in self.PARAMETERIZABLE_TEXT_TYPES and (
623                not expression.expressions or expression.expressions[0].name == "MAX"
624            ):
625                expression = exp.DataType.build("text")
626            elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions:
627                expression.set("this", exp.DataType.Type.VARCHAR)
628            elif expression.this in exp.DataType.TEMPORAL_TYPES:
629                expression = exp.DataType.build(expression.this)
630            elif expression.is_type("float"):
631                size_expression = expression.find(exp.DataTypeParam)
632                if size_expression:
633                    size = int(size_expression.name)
634                    expression = (
635                        exp.DataType.build("float") if size <= 32 else exp.DataType.build("double")
636                    )
637
638            return super().datatype_sql(expression)
639
640        def version_sql(self, expression: exp.Version) -> str:
641            sql = super().version_sql(expression)
642            return sql.replace("FOR ", "", 1)
643
644        def struct_sql(self, expression: exp.Struct) -> str:
645            values = []
646
647            for i, e in enumerate(expression.expressions):
648                if isinstance(e, exp.PropertyEQ):
649                    self.unsupported("Hive does not support named structs.")
650                    values.append(e.expression)
651                else:
652                    values.append(e)
653
654            return self.func("STRUCT", *values)

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. Default: 2.
  • indent: The indentation size in a formatted string. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
LIMIT_FETCH = 'LIMIT'
TABLESAMPLE_WITH_METHOD = False
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
INDEX_ON = 'ON TABLE'
EXTRACT_ALLOWS_QUOTES = False
NVL2_SUPPORTED = False
LAST_DAY_SUPPORTS_DATE_PART = False
JSON_PATH_SINGLE_QUOTE_ESCAPE = True
EXPRESSIONS_WITHOUT_NESTED_CTES = {<class 'sqlglot.expressions.Insert'>, <class 'sqlglot.expressions.Subquery'>, <class 'sqlglot.expressions.Union'>, <class 'sqlglot.expressions.Select'>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIT: 'BIT'>: 'BOOLEAN', <Type.DATETIME: 'DATETIME'>: 'TIMESTAMP', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIME: 'TIME'>: 'TIMESTAMP', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.VARBINARY: 'VARBINARY'>: 'BINARY'}
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathWildcard'>: <function <lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateAdd'>: <function _add_date_sql>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Timestamp'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function var_map_sql>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Group'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Property'>: <function _property_sql>, <class 'sqlglot.expressions.AnyValue'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function approx_count_distinct_sql>, <class 'sqlglot.expressions.ArgMax'>: <function arg_max_or_min_no_count.<locals>._arg_max_or_min_sql>, <class 'sqlglot.expressions.ArgMin'>: <function arg_max_or_min_no_count.<locals>._arg_max_or_min_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayJoin'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySort'>: <function _array_sort_sql>, <class 'sqlglot.expressions.With'>: <function no_recursive_cte_sql>, <class 'sqlglot.expressions.DateDiff'>: <function _date_diff_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function _add_date_sql>, <class 'sqlglot.expressions.DateToDi'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.DiToDate'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.FileFormatProperty'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.FromBase64'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IsNan'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function _json_format_sql>, <class 'sqlglot.expressions.Left'>: <function left_to_substring_sql>, <class 'sqlglot.expressions.Map'>: <function var_map_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5Digest'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.MonthsBetween'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.NotNullColumnConstraint'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Quantile'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ApproxQuantile'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function regexp_extract_sql>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpSplit'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Right'>: <function right_to_substring_sql>, <class 'sqlglot.expressions.SafeDivide'>: <function no_safe_divide_sql>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayUniqueAgg'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Split'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function strposition_to_locate_sql>, <class 'sqlglot.expressions.StrToDate'>: <function _str_to_date_sql>, <class 'sqlglot.expressions.StrToTime'>: <function _str_to_time_sql>, <class 'sqlglot.expressions.StrToUnix'>: <function _str_to_unix_sql>, <class 'sqlglot.expressions.StructExtract'>: <function struct_extract_sql>, <class 'sqlglot.expressions.TimeStrToDate'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeStrToUnix'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeToStr'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToBase64'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDiToDi'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _add_date_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function _date_diff_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function _to_date_sql>, <class 'sqlglot.expressions.TryCast'>: <function no_trycast_sql>, <class 'sqlglot.expressions.UnixToStr'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function _unix_to_time_sql>, <class 'sqlglot.expressions.UnixToTimeStr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.SerdeProperties'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.NumberToStr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.National'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.PrimaryKeyColumnConstraint'>: <function Hive.Generator.<lambda>>}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
def parameter_sql(self, expression: sqlglot.expressions.Parameter) -> str:
579        def parameter_sql(self, expression: exp.Parameter) -> str:
580            this = self.sql(expression, "this")
581            expression_sql = self.sql(expression, "expression")
582
583            parent = expression.parent
584            this = f"{this}:{expression_sql}" if expression_sql else this
585
586            if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem):
587                # We need to produce SET key = value instead of SET ${key} = value
588                return this
589
590            return f"${{{this}}}"
def schema_sql(self, expression: sqlglot.expressions.Schema) -> str:
592        def schema_sql(self, expression: exp.Schema) -> str:
593            for ordered in expression.find_all(exp.Ordered):
594                if ordered.args.get("desc") is False:
595                    ordered.set("desc", None)
596
597            return super().schema_sql(expression)
def constraint_sql(self, expression: sqlglot.expressions.Constraint) -> str:
599        def constraint_sql(self, expression: exp.Constraint) -> str:
600            for prop in list(expression.find_all(exp.Properties)):
601                prop.pop()
602
603            this = self.sql(expression, "this")
604            expressions = self.expressions(expression, sep=" ", flat=True)
605            return f"CONSTRAINT {this} {expressions}"
def rowformatserdeproperty_sql(self, expression: sqlglot.expressions.RowFormatSerdeProperty) -> str:
607        def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str:
608            serde_props = self.sql(expression, "serde_properties")
609            serde_props = f" {serde_props}" if serde_props else ""
610            return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}"
def arrayagg_sql(self, expression: sqlglot.expressions.ArrayAgg) -> str:
612        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
613            return self.func(
614                "COLLECT_LIST",
615                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
616            )
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
618        def with_properties(self, properties: exp.Properties) -> str:
619            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
621        def datatype_sql(self, expression: exp.DataType) -> str:
622            if expression.this in self.PARAMETERIZABLE_TEXT_TYPES and (
623                not expression.expressions or expression.expressions[0].name == "MAX"
624            ):
625                expression = exp.DataType.build("text")
626            elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions:
627                expression.set("this", exp.DataType.Type.VARCHAR)
628            elif expression.this in exp.DataType.TEMPORAL_TYPES:
629                expression = exp.DataType.build(expression.this)
630            elif expression.is_type("float"):
631                size_expression = expression.find(exp.DataTypeParam)
632                if size_expression:
633                    size = int(size_expression.name)
634                    expression = (
635                        exp.DataType.build("float") if size <= 32 else exp.DataType.build("double")
636                    )
637
638            return super().datatype_sql(expression)
def version_sql(self, expression: sqlglot.expressions.Version) -> str:
640        def version_sql(self, expression: exp.Version) -> str:
641            sql = super().version_sql(expression)
642            return sql.replace("FOR ", "", 1)
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
644        def struct_sql(self, expression: exp.Struct) -> str:
645            values = []
646
647            for i, e in enumerate(expression.expressions):
648                if isinstance(e, exp.PropertyEQ):
649                    self.unsupported("Hive does not support named structs.")
650                    values.append(e.expression)
651                else:
652                    values.append(e)
653
654            return self.func("STRUCT", *values)
SELECT_KINDS: Tuple[str, ...] = ()
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
EXPLICIT_UNION
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
MATCHED_BY_SOURCE
SINGLE_STRING_INTERVAL
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_ONLY_LITERALS
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
TZ_TO_WITH_TIME_ZONE
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
AGGREGATE_FILTER_SUPPORTED
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
SUPPORTS_TABLE_COPY
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_SEED_KEYWORD
COLLATE_IS_FUNC
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
JSON_KEY_VALUE_PAIR_SEP
INSERT_OVERWRITE
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
CAN_IMPLEMENT_ARRAY_ANY
STAR_MAPPING
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
PARAMETER_TOKEN
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
describe_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
except_sql
except_op
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
intersect_sql
intersect_op
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
queryoption_sql
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_columns_sql
star_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
currenttimestamp_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
renamecolumn_sql
altertable_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
trycast_sql
log_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
operator_sql
toarray_sql
tsordstotime_sql
tsordstodate_sql
unixdate_sql
lastday_sql
arrayany_sql
generateseries_sql
partitionrange_sql
truncatetable_sql
convert_sql