sqlglot.dialects.redshift
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, transforms 6from sqlglot.dialects.dialect import ( 7 NormalizationStrategy, 8 array_concat_sql, 9 concat_to_dpipe_sql, 10 concat_ws_to_dpipe_sql, 11 date_delta_sql, 12 generatedasidentitycolumnconstraint_sql, 13 json_extract_segments, 14 no_tablesample_sql, 15 rename_func, 16 map_date_part, 17) 18from sqlglot.dialects.postgres import Postgres 19from sqlglot.helper import seq_get 20from sqlglot.tokens import TokenType 21from sqlglot.parser import build_convert_timezone 22 23if t.TYPE_CHECKING: 24 from sqlglot._typing import E 25 26 27def _build_date_delta(expr_type: t.Type[E]) -> t.Callable[[t.List], E]: 28 def _builder(args: t.List) -> E: 29 expr = expr_type( 30 this=seq_get(args, 2), 31 expression=seq_get(args, 1), 32 unit=map_date_part(seq_get(args, 0)), 33 ) 34 if expr_type is exp.TsOrDsAdd: 35 expr.set("return_type", exp.DataType.build("TIMESTAMP")) 36 37 return expr 38 39 return _builder 40 41 42class Redshift(Postgres): 43 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 44 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 45 46 SUPPORTS_USER_DEFINED_TYPES = False 47 INDEX_OFFSET = 0 48 COPY_PARAMS_ARE_CSV = False 49 HEX_LOWERCASE = True 50 HAS_DISTINCT_ARRAY_CONSTRUCTORS = True 51 COALESCE_COMPARISON_NON_STANDARD = True 52 REGEXP_EXTRACT_POSITION_OVERFLOW_RETURNS_NULL = False 53 ARRAY_FUNCS_PROPAGATES_NULLS = True 54 55 # ref: https://docs.aws.amazon.com/redshift/latest/dg/r_FORMAT_strings.html 56 TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" 57 TIME_MAPPING = {**Postgres.TIME_MAPPING, "MON": "%b", "HH24": "%H", "HH": "%I"} 58 59 class Parser(Postgres.Parser): 60 FUNCTIONS = { 61 **Postgres.Parser.FUNCTIONS, 62 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 63 this=seq_get(args, 0), 64 expression=seq_get(args, 1), 65 unit=exp.var("month"), 66 return_type=exp.DataType.build("TIMESTAMP"), 67 ), 68 "CONVERT_TIMEZONE": lambda args: build_convert_timezone(args, "UTC"), 69 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 70 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 71 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 72 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 73 "GETDATE": exp.CurrentTimestamp.from_arg_list, 74 "LISTAGG": exp.GroupConcat.from_arg_list, 75 "REGEXP_SUBSTR": lambda args: exp.RegexpExtract( 76 this=seq_get(args, 0), 77 expression=seq_get(args, 1), 78 position=seq_get(args, 2), 79 occurrence=seq_get(args, 3), 80 parameters=seq_get(args, 4), 81 ), 82 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 83 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 84 ), 85 "STRTOL": exp.FromBase.from_arg_list, 86 } 87 FUNCTIONS.pop("GET_BIT") 88 89 NO_PAREN_FUNCTION_PARSERS = { 90 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 91 "APPROXIMATE": lambda self: self._parse_approximate_count(), 92 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 93 } 94 95 SUPPORTS_IMPLICIT_UNNEST = True 96 97 def _parse_table( 98 self, 99 schema: bool = False, 100 joins: bool = False, 101 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 102 parse_bracket: bool = False, 103 is_db_reference: bool = False, 104 parse_partition: bool = False, 105 consume_pipe: bool = False, 106 ) -> t.Optional[exp.Expression]: 107 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 108 unpivot = self._match(TokenType.UNPIVOT) 109 table = super()._parse_table( 110 schema=schema, 111 joins=joins, 112 alias_tokens=alias_tokens, 113 parse_bracket=parse_bracket, 114 is_db_reference=is_db_reference, 115 ) 116 117 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 118 119 def _parse_convert( 120 self, strict: bool, safe: t.Optional[bool] = None 121 ) -> t.Optional[exp.Expression]: 122 to = self._parse_types() 123 self._match(TokenType.COMMA) 124 this = self._parse_bitwise() 125 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 126 127 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 128 index = self._index - 1 129 func = self._parse_function() 130 131 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 132 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 133 self._retreat(index) 134 return None 135 136 def _parse_projections(self) -> t.Tuple[t.List[exp.Expression], t.List[exp.Expression]]: 137 projections, _ = super()._parse_projections() 138 if self._prev and self._prev.text.upper() == "EXCLUDE" and self._curr: 139 self._retreat(self._index - 1) 140 141 # EXCLUDE clause always comes at the end of the projection list and applies to it as a whole 142 exclude = ( 143 self._parse_wrapped_csv(self._parse_expression, optional=True) 144 if self._match_text_seq("EXCLUDE") 145 else [] 146 ) 147 148 if ( 149 exclude 150 and isinstance(expr := projections[-1], exp.Alias) 151 and expr.alias.upper() == "EXCLUDE" 152 ): 153 projections[-1] = expr.this.pop() 154 155 return projections, exclude 156 157 class Tokenizer(Postgres.Tokenizer): 158 BIT_STRINGS = [] 159 HEX_STRINGS = [] 160 STRING_ESCAPES = ["\\", "'"] 161 162 KEYWORDS = { 163 **Postgres.Tokenizer.KEYWORDS, 164 "(+)": TokenType.JOIN_MARKER, 165 "HLLSKETCH": TokenType.HLLSKETCH, 166 "MINUS": TokenType.EXCEPT, 167 "SUPER": TokenType.SUPER, 168 "TOP": TokenType.TOP, 169 "UNLOAD": TokenType.COMMAND, 170 "VARBYTE": TokenType.VARBINARY, 171 "BINARY VARYING": TokenType.VARBINARY, 172 } 173 KEYWORDS.pop("VALUES") 174 175 # Redshift allows # to appear as a table identifier prefix 176 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 177 SINGLE_TOKENS.pop("#") 178 179 class Generator(Postgres.Generator): 180 LOCKING_READS_SUPPORTED = False 181 QUERY_HINTS = False 182 VALUES_AS_TABLE = False 183 TZ_TO_WITH_TIME_ZONE = True 184 NVL2_SUPPORTED = True 185 LAST_DAY_SUPPORTS_DATE_PART = False 186 CAN_IMPLEMENT_ARRAY_ANY = False 187 MULTI_ARG_DISTINCT = True 188 COPY_PARAMS_ARE_WRAPPED = False 189 HEX_FUNC = "TO_HEX" 190 PARSE_JSON_NAME = "JSON_PARSE" 191 ARRAY_CONCAT_IS_VAR_LEN = False 192 SUPPORTS_CONVERT_TIMEZONE = True 193 EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False 194 SUPPORTS_MEDIAN = True 195 ALTER_SET_TYPE = "TYPE" 196 SUPPORTS_DECODE_CASE = True 197 SUPPORTS_BETWEEN_FLAGS = False 198 LIMIT_FETCH = "LIMIT" 199 STAR_EXCEPT = "EXCLUDE" 200 STAR_EXCLUDE_REQUIRES_DERIVED_TABLE = False 201 202 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 203 WITH_PROPERTIES_PREFIX = " " 204 205 TYPE_MAPPING = { 206 **Postgres.Generator.TYPE_MAPPING, 207 exp.DataType.Type.BINARY: "VARBYTE", 208 exp.DataType.Type.BLOB: "VARBYTE", 209 exp.DataType.Type.INT: "INTEGER", 210 exp.DataType.Type.TIMETZ: "TIME", 211 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 212 exp.DataType.Type.VARBINARY: "VARBYTE", 213 exp.DataType.Type.ROWVERSION: "VARBYTE", 214 } 215 216 TRANSFORMS = { 217 **Postgres.Generator.TRANSFORMS, 218 exp.ArrayConcat: array_concat_sql("ARRAY_CONCAT"), 219 exp.Concat: concat_to_dpipe_sql, 220 exp.ConcatWs: concat_ws_to_dpipe_sql, 221 exp.ApproxDistinct: lambda self, 222 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 223 exp.CurrentTimestamp: lambda self, e: ( 224 "SYSDATE" if e.args.get("sysdate") else "GETDATE()" 225 ), 226 exp.DateAdd: date_delta_sql("DATEADD"), 227 exp.DateDiff: date_delta_sql("DATEDIFF"), 228 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 229 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 230 exp.Explode: lambda self, e: self.explode_sql(e), 231 exp.FarmFingerprint: rename_func("FARMFINGERPRINT64"), 232 exp.FromBase: rename_func("STRTOL"), 233 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 234 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 235 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 236 exp.GroupConcat: rename_func("LISTAGG"), 237 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 238 exp.RegexpExtract: rename_func("REGEXP_SUBSTR"), 239 exp.Select: transforms.preprocess( 240 [ 241 transforms.eliminate_window_clause, 242 transforms.eliminate_distinct_on, 243 transforms.eliminate_semi_and_anti_joins, 244 transforms.unqualify_unnest, 245 transforms.unnest_generate_date_array_using_recursive_cte, 246 ] 247 ), 248 exp.SortKeyProperty: lambda self, 249 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 250 exp.StartsWith: lambda self, 251 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 252 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 253 exp.TableSample: no_tablesample_sql, 254 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 255 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 256 exp.UnixToTime: lambda self, e: self._unix_to_time_sql(e), 257 exp.SHA2Digest: lambda self, e: self.func( 258 "SHA2", e.this, e.args.get("length") or exp.Literal.number(256) 259 ), 260 } 261 262 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 263 TRANSFORMS.pop(exp.Pivot) 264 265 # Postgres doesn't support JSON_PARSE, but Redshift does 266 TRANSFORMS.pop(exp.ParseJSON) 267 268 # Redshift supports these functions 269 TRANSFORMS.pop(exp.AnyValue) 270 TRANSFORMS.pop(exp.LastDay) 271 TRANSFORMS.pop(exp.SHA2) 272 273 # Postgres and Redshift have different semantics for Getbit 274 TRANSFORMS.pop(exp.Getbit) 275 276 # Postgres does not permit a double precision argument in ROUND; Redshift does 277 TRANSFORMS.pop(exp.Round) 278 279 RESERVED_KEYWORDS = { 280 "aes128", 281 "aes256", 282 "all", 283 "allowoverwrite", 284 "analyse", 285 "analyze", 286 "and", 287 "any", 288 "array", 289 "as", 290 "asc", 291 "authorization", 292 "az64", 293 "backup", 294 "between", 295 "binary", 296 "blanksasnull", 297 "both", 298 "bytedict", 299 "bzip2", 300 "case", 301 "cast", 302 "check", 303 "collate", 304 "column", 305 "constraint", 306 "create", 307 "credentials", 308 "cross", 309 "current_date", 310 "current_time", 311 "current_timestamp", 312 "current_user", 313 "current_user_id", 314 "default", 315 "deferrable", 316 "deflate", 317 "defrag", 318 "delta", 319 "delta32k", 320 "desc", 321 "disable", 322 "distinct", 323 "do", 324 "else", 325 "emptyasnull", 326 "enable", 327 "encode", 328 "encrypt ", 329 "encryption", 330 "end", 331 "except", 332 "explicit", 333 "false", 334 "for", 335 "foreign", 336 "freeze", 337 "from", 338 "full", 339 "globaldict256", 340 "globaldict64k", 341 "grant", 342 "group", 343 "gzip", 344 "having", 345 "identity", 346 "ignore", 347 "ilike", 348 "in", 349 "initially", 350 "inner", 351 "intersect", 352 "interval", 353 "into", 354 "is", 355 "isnull", 356 "join", 357 "leading", 358 "left", 359 "like", 360 "limit", 361 "localtime", 362 "localtimestamp", 363 "lun", 364 "luns", 365 "lzo", 366 "lzop", 367 "minus", 368 "mostly16", 369 "mostly32", 370 "mostly8", 371 "natural", 372 "new", 373 "not", 374 "notnull", 375 "null", 376 "nulls", 377 "off", 378 "offline", 379 "offset", 380 "oid", 381 "old", 382 "on", 383 "only", 384 "open", 385 "or", 386 "order", 387 "outer", 388 "overlaps", 389 "parallel", 390 "partition", 391 "percent", 392 "permissions", 393 "pivot", 394 "placing", 395 "primary", 396 "raw", 397 "readratio", 398 "recover", 399 "references", 400 "rejectlog", 401 "resort", 402 "respect", 403 "restore", 404 "right", 405 "select", 406 "session_user", 407 "similar", 408 "snapshot", 409 "some", 410 "sysdate", 411 "system", 412 "table", 413 "tag", 414 "tdes", 415 "text255", 416 "text32k", 417 "then", 418 "timestamp", 419 "to", 420 "top", 421 "trailing", 422 "true", 423 "truncatecolumns", 424 "type", 425 "union", 426 "unique", 427 "unnest", 428 "unpivot", 429 "user", 430 "using", 431 "verbose", 432 "wallet", 433 "when", 434 "where", 435 "with", 436 "without", 437 } 438 439 def unnest_sql(self, expression: exp.Unnest) -> str: 440 args = expression.expressions 441 num_args = len(args) 442 443 if num_args != 1: 444 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 445 return "" 446 447 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 448 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 449 return "" 450 451 arg = self.sql(seq_get(args, 0)) 452 453 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 454 return f"{arg} AS {alias}" if alias else arg 455 456 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 457 if expression.is_type(exp.DataType.Type.JSON): 458 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 459 return self.sql(expression, "this") 460 461 return super().cast_sql(expression, safe_prefix=safe_prefix) 462 463 def datatype_sql(self, expression: exp.DataType) -> str: 464 """ 465 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 466 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 467 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 468 `TEXT` to `VARCHAR`. 469 """ 470 if expression.is_type("text"): 471 expression.set("this", exp.DataType.Type.VARCHAR) 472 precision = expression.args.get("expressions") 473 474 if not precision: 475 expression.append("expressions", exp.var("MAX")) 476 477 return super().datatype_sql(expression) 478 479 def alterset_sql(self, expression: exp.AlterSet) -> str: 480 exprs = self.expressions(expression, flat=True) 481 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 482 location = self.sql(expression, "location") 483 location = f" LOCATION {location}" if location else "" 484 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 485 file_format = f" FILE FORMAT {file_format}" if file_format else "" 486 487 return f"SET{exprs}{location}{file_format}" 488 489 def array_sql(self, expression: exp.Array) -> str: 490 if expression.args.get("bracket_notation"): 491 return super().array_sql(expression) 492 493 return rename_func("ARRAY")(self, expression) 494 495 def explode_sql(self, expression: exp.Explode) -> str: 496 self.unsupported("Unsupported EXPLODE() function") 497 return "" 498 499 def _unix_to_time_sql(self, expression: exp.UnixToTime) -> str: 500 scale = expression.args.get("scale") 501 this = self.sql(expression.this) 502 503 if scale is not None and scale != exp.UnixToTime.SECONDS and scale.is_int: 504 this = f"({this} / POWER(10, {scale.to_py()}))" 505 506 return f"(TIMESTAMP 'epoch' + {this} * INTERVAL '1 SECOND')"
43class Redshift(Postgres): 44 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 45 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 46 47 SUPPORTS_USER_DEFINED_TYPES = False 48 INDEX_OFFSET = 0 49 COPY_PARAMS_ARE_CSV = False 50 HEX_LOWERCASE = True 51 HAS_DISTINCT_ARRAY_CONSTRUCTORS = True 52 COALESCE_COMPARISON_NON_STANDARD = True 53 REGEXP_EXTRACT_POSITION_OVERFLOW_RETURNS_NULL = False 54 ARRAY_FUNCS_PROPAGATES_NULLS = True 55 56 # ref: https://docs.aws.amazon.com/redshift/latest/dg/r_FORMAT_strings.html 57 TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" 58 TIME_MAPPING = {**Postgres.TIME_MAPPING, "MON": "%b", "HH24": "%H", "HH": "%I"} 59 60 class Parser(Postgres.Parser): 61 FUNCTIONS = { 62 **Postgres.Parser.FUNCTIONS, 63 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 64 this=seq_get(args, 0), 65 expression=seq_get(args, 1), 66 unit=exp.var("month"), 67 return_type=exp.DataType.build("TIMESTAMP"), 68 ), 69 "CONVERT_TIMEZONE": lambda args: build_convert_timezone(args, "UTC"), 70 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 71 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 72 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 73 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 74 "GETDATE": exp.CurrentTimestamp.from_arg_list, 75 "LISTAGG": exp.GroupConcat.from_arg_list, 76 "REGEXP_SUBSTR": lambda args: exp.RegexpExtract( 77 this=seq_get(args, 0), 78 expression=seq_get(args, 1), 79 position=seq_get(args, 2), 80 occurrence=seq_get(args, 3), 81 parameters=seq_get(args, 4), 82 ), 83 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 84 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 85 ), 86 "STRTOL": exp.FromBase.from_arg_list, 87 } 88 FUNCTIONS.pop("GET_BIT") 89 90 NO_PAREN_FUNCTION_PARSERS = { 91 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 92 "APPROXIMATE": lambda self: self._parse_approximate_count(), 93 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 94 } 95 96 SUPPORTS_IMPLICIT_UNNEST = True 97 98 def _parse_table( 99 self, 100 schema: bool = False, 101 joins: bool = False, 102 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 103 parse_bracket: bool = False, 104 is_db_reference: bool = False, 105 parse_partition: bool = False, 106 consume_pipe: bool = False, 107 ) -> t.Optional[exp.Expression]: 108 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 109 unpivot = self._match(TokenType.UNPIVOT) 110 table = super()._parse_table( 111 schema=schema, 112 joins=joins, 113 alias_tokens=alias_tokens, 114 parse_bracket=parse_bracket, 115 is_db_reference=is_db_reference, 116 ) 117 118 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 119 120 def _parse_convert( 121 self, strict: bool, safe: t.Optional[bool] = None 122 ) -> t.Optional[exp.Expression]: 123 to = self._parse_types() 124 self._match(TokenType.COMMA) 125 this = self._parse_bitwise() 126 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 127 128 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 129 index = self._index - 1 130 func = self._parse_function() 131 132 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 133 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 134 self._retreat(index) 135 return None 136 137 def _parse_projections(self) -> t.Tuple[t.List[exp.Expression], t.List[exp.Expression]]: 138 projections, _ = super()._parse_projections() 139 if self._prev and self._prev.text.upper() == "EXCLUDE" and self._curr: 140 self._retreat(self._index - 1) 141 142 # EXCLUDE clause always comes at the end of the projection list and applies to it as a whole 143 exclude = ( 144 self._parse_wrapped_csv(self._parse_expression, optional=True) 145 if self._match_text_seq("EXCLUDE") 146 else [] 147 ) 148 149 if ( 150 exclude 151 and isinstance(expr := projections[-1], exp.Alias) 152 and expr.alias.upper() == "EXCLUDE" 153 ): 154 projections[-1] = expr.this.pop() 155 156 return projections, exclude 157 158 class Tokenizer(Postgres.Tokenizer): 159 BIT_STRINGS = [] 160 HEX_STRINGS = [] 161 STRING_ESCAPES = ["\\", "'"] 162 163 KEYWORDS = { 164 **Postgres.Tokenizer.KEYWORDS, 165 "(+)": TokenType.JOIN_MARKER, 166 "HLLSKETCH": TokenType.HLLSKETCH, 167 "MINUS": TokenType.EXCEPT, 168 "SUPER": TokenType.SUPER, 169 "TOP": TokenType.TOP, 170 "UNLOAD": TokenType.COMMAND, 171 "VARBYTE": TokenType.VARBINARY, 172 "BINARY VARYING": TokenType.VARBINARY, 173 } 174 KEYWORDS.pop("VALUES") 175 176 # Redshift allows # to appear as a table identifier prefix 177 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 178 SINGLE_TOKENS.pop("#") 179 180 class Generator(Postgres.Generator): 181 LOCKING_READS_SUPPORTED = False 182 QUERY_HINTS = False 183 VALUES_AS_TABLE = False 184 TZ_TO_WITH_TIME_ZONE = True 185 NVL2_SUPPORTED = True 186 LAST_DAY_SUPPORTS_DATE_PART = False 187 CAN_IMPLEMENT_ARRAY_ANY = False 188 MULTI_ARG_DISTINCT = True 189 COPY_PARAMS_ARE_WRAPPED = False 190 HEX_FUNC = "TO_HEX" 191 PARSE_JSON_NAME = "JSON_PARSE" 192 ARRAY_CONCAT_IS_VAR_LEN = False 193 SUPPORTS_CONVERT_TIMEZONE = True 194 EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False 195 SUPPORTS_MEDIAN = True 196 ALTER_SET_TYPE = "TYPE" 197 SUPPORTS_DECODE_CASE = True 198 SUPPORTS_BETWEEN_FLAGS = False 199 LIMIT_FETCH = "LIMIT" 200 STAR_EXCEPT = "EXCLUDE" 201 STAR_EXCLUDE_REQUIRES_DERIVED_TABLE = False 202 203 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 204 WITH_PROPERTIES_PREFIX = " " 205 206 TYPE_MAPPING = { 207 **Postgres.Generator.TYPE_MAPPING, 208 exp.DataType.Type.BINARY: "VARBYTE", 209 exp.DataType.Type.BLOB: "VARBYTE", 210 exp.DataType.Type.INT: "INTEGER", 211 exp.DataType.Type.TIMETZ: "TIME", 212 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 213 exp.DataType.Type.VARBINARY: "VARBYTE", 214 exp.DataType.Type.ROWVERSION: "VARBYTE", 215 } 216 217 TRANSFORMS = { 218 **Postgres.Generator.TRANSFORMS, 219 exp.ArrayConcat: array_concat_sql("ARRAY_CONCAT"), 220 exp.Concat: concat_to_dpipe_sql, 221 exp.ConcatWs: concat_ws_to_dpipe_sql, 222 exp.ApproxDistinct: lambda self, 223 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 224 exp.CurrentTimestamp: lambda self, e: ( 225 "SYSDATE" if e.args.get("sysdate") else "GETDATE()" 226 ), 227 exp.DateAdd: date_delta_sql("DATEADD"), 228 exp.DateDiff: date_delta_sql("DATEDIFF"), 229 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 230 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 231 exp.Explode: lambda self, e: self.explode_sql(e), 232 exp.FarmFingerprint: rename_func("FARMFINGERPRINT64"), 233 exp.FromBase: rename_func("STRTOL"), 234 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 235 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 236 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 237 exp.GroupConcat: rename_func("LISTAGG"), 238 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 239 exp.RegexpExtract: rename_func("REGEXP_SUBSTR"), 240 exp.Select: transforms.preprocess( 241 [ 242 transforms.eliminate_window_clause, 243 transforms.eliminate_distinct_on, 244 transforms.eliminate_semi_and_anti_joins, 245 transforms.unqualify_unnest, 246 transforms.unnest_generate_date_array_using_recursive_cte, 247 ] 248 ), 249 exp.SortKeyProperty: lambda self, 250 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 251 exp.StartsWith: lambda self, 252 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 253 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 254 exp.TableSample: no_tablesample_sql, 255 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 256 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 257 exp.UnixToTime: lambda self, e: self._unix_to_time_sql(e), 258 exp.SHA2Digest: lambda self, e: self.func( 259 "SHA2", e.this, e.args.get("length") or exp.Literal.number(256) 260 ), 261 } 262 263 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 264 TRANSFORMS.pop(exp.Pivot) 265 266 # Postgres doesn't support JSON_PARSE, but Redshift does 267 TRANSFORMS.pop(exp.ParseJSON) 268 269 # Redshift supports these functions 270 TRANSFORMS.pop(exp.AnyValue) 271 TRANSFORMS.pop(exp.LastDay) 272 TRANSFORMS.pop(exp.SHA2) 273 274 # Postgres and Redshift have different semantics for Getbit 275 TRANSFORMS.pop(exp.Getbit) 276 277 # Postgres does not permit a double precision argument in ROUND; Redshift does 278 TRANSFORMS.pop(exp.Round) 279 280 RESERVED_KEYWORDS = { 281 "aes128", 282 "aes256", 283 "all", 284 "allowoverwrite", 285 "analyse", 286 "analyze", 287 "and", 288 "any", 289 "array", 290 "as", 291 "asc", 292 "authorization", 293 "az64", 294 "backup", 295 "between", 296 "binary", 297 "blanksasnull", 298 "both", 299 "bytedict", 300 "bzip2", 301 "case", 302 "cast", 303 "check", 304 "collate", 305 "column", 306 "constraint", 307 "create", 308 "credentials", 309 "cross", 310 "current_date", 311 "current_time", 312 "current_timestamp", 313 "current_user", 314 "current_user_id", 315 "default", 316 "deferrable", 317 "deflate", 318 "defrag", 319 "delta", 320 "delta32k", 321 "desc", 322 "disable", 323 "distinct", 324 "do", 325 "else", 326 "emptyasnull", 327 "enable", 328 "encode", 329 "encrypt ", 330 "encryption", 331 "end", 332 "except", 333 "explicit", 334 "false", 335 "for", 336 "foreign", 337 "freeze", 338 "from", 339 "full", 340 "globaldict256", 341 "globaldict64k", 342 "grant", 343 "group", 344 "gzip", 345 "having", 346 "identity", 347 "ignore", 348 "ilike", 349 "in", 350 "initially", 351 "inner", 352 "intersect", 353 "interval", 354 "into", 355 "is", 356 "isnull", 357 "join", 358 "leading", 359 "left", 360 "like", 361 "limit", 362 "localtime", 363 "localtimestamp", 364 "lun", 365 "luns", 366 "lzo", 367 "lzop", 368 "minus", 369 "mostly16", 370 "mostly32", 371 "mostly8", 372 "natural", 373 "new", 374 "not", 375 "notnull", 376 "null", 377 "nulls", 378 "off", 379 "offline", 380 "offset", 381 "oid", 382 "old", 383 "on", 384 "only", 385 "open", 386 "or", 387 "order", 388 "outer", 389 "overlaps", 390 "parallel", 391 "partition", 392 "percent", 393 "permissions", 394 "pivot", 395 "placing", 396 "primary", 397 "raw", 398 "readratio", 399 "recover", 400 "references", 401 "rejectlog", 402 "resort", 403 "respect", 404 "restore", 405 "right", 406 "select", 407 "session_user", 408 "similar", 409 "snapshot", 410 "some", 411 "sysdate", 412 "system", 413 "table", 414 "tag", 415 "tdes", 416 "text255", 417 "text32k", 418 "then", 419 "timestamp", 420 "to", 421 "top", 422 "trailing", 423 "true", 424 "truncatecolumns", 425 "type", 426 "union", 427 "unique", 428 "unnest", 429 "unpivot", 430 "user", 431 "using", 432 "verbose", 433 "wallet", 434 "when", 435 "where", 436 "with", 437 "without", 438 } 439 440 def unnest_sql(self, expression: exp.Unnest) -> str: 441 args = expression.expressions 442 num_args = len(args) 443 444 if num_args != 1: 445 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 446 return "" 447 448 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 449 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 450 return "" 451 452 arg = self.sql(seq_get(args, 0)) 453 454 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 455 return f"{arg} AS {alias}" if alias else arg 456 457 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 458 if expression.is_type(exp.DataType.Type.JSON): 459 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 460 return self.sql(expression, "this") 461 462 return super().cast_sql(expression, safe_prefix=safe_prefix) 463 464 def datatype_sql(self, expression: exp.DataType) -> str: 465 """ 466 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 467 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 468 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 469 `TEXT` to `VARCHAR`. 470 """ 471 if expression.is_type("text"): 472 expression.set("this", exp.DataType.Type.VARCHAR) 473 precision = expression.args.get("expressions") 474 475 if not precision: 476 expression.append("expressions", exp.var("MAX")) 477 478 return super().datatype_sql(expression) 479 480 def alterset_sql(self, expression: exp.AlterSet) -> str: 481 exprs = self.expressions(expression, flat=True) 482 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 483 location = self.sql(expression, "location") 484 location = f" LOCATION {location}" if location else "" 485 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 486 file_format = f" FILE FORMAT {file_format}" if file_format else "" 487 488 return f"SET{exprs}{location}{file_format}" 489 490 def array_sql(self, expression: exp.Array) -> str: 491 if expression.args.get("bracket_notation"): 492 return super().array_sql(expression) 493 494 return rename_func("ARRAY")(self, expression) 495 496 def explode_sql(self, expression: exp.Explode) -> str: 497 self.unsupported("Unsupported EXPLODE() function") 498 return "" 499 500 def _unix_to_time_sql(self, expression: exp.UnixToTime) -> str: 501 scale = expression.args.get("scale") 502 this = self.sql(expression.this) 503 504 if scale is not None and scale != exp.UnixToTime.SECONDS and scale.is_int: 505 this = f"({this} / POWER(10, {scale.to_py()}))" 506 507 return f"(TIMESTAMP 'epoch' + {this} * INTERVAL '1 SECOND')"
Specifies the strategy according to which identifiers should be normalized.
Whether the ARRAY constructor is context-sensitive, i.e in Redshift ARRAY[1, 2, 3] != ARRAY(1, 2, 3) as the former is of type INT[] vs the latter which is SUPER
Whether COALESCE in comparisons has non-standard NULL semantics.
We can't convert COALESCE(x, 1) = 2 into NOT x IS NULL AND x = 2 for redshift,
because they are not always equivalent. For example, if x is NULL and it comes
from a table, then the result is NULL, despite FALSE AND NULL evaluating to FALSE.
In standard SQL and most dialects, these expressions are equivalent, but Redshift treats table NULLs differently in this context.
Whether REGEXP_EXTRACT returns NULL when the position arg exceeds the string length.
Whether Array update functions return NULL when the input array is NULL.
Associates this dialect's time formats with their equivalent Python strftime formats.
Mapping of an escaped sequence (\n) to its unescaped version (
).
60 class Parser(Postgres.Parser): 61 FUNCTIONS = { 62 **Postgres.Parser.FUNCTIONS, 63 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 64 this=seq_get(args, 0), 65 expression=seq_get(args, 1), 66 unit=exp.var("month"), 67 return_type=exp.DataType.build("TIMESTAMP"), 68 ), 69 "CONVERT_TIMEZONE": lambda args: build_convert_timezone(args, "UTC"), 70 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 71 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 72 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 73 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 74 "GETDATE": exp.CurrentTimestamp.from_arg_list, 75 "LISTAGG": exp.GroupConcat.from_arg_list, 76 "REGEXP_SUBSTR": lambda args: exp.RegexpExtract( 77 this=seq_get(args, 0), 78 expression=seq_get(args, 1), 79 position=seq_get(args, 2), 80 occurrence=seq_get(args, 3), 81 parameters=seq_get(args, 4), 82 ), 83 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 84 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 85 ), 86 "STRTOL": exp.FromBase.from_arg_list, 87 } 88 FUNCTIONS.pop("GET_BIT") 89 90 NO_PAREN_FUNCTION_PARSERS = { 91 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 92 "APPROXIMATE": lambda self: self._parse_approximate_count(), 93 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 94 } 95 96 SUPPORTS_IMPLICIT_UNNEST = True 97 98 def _parse_table( 99 self, 100 schema: bool = False, 101 joins: bool = False, 102 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 103 parse_bracket: bool = False, 104 is_db_reference: bool = False, 105 parse_partition: bool = False, 106 consume_pipe: bool = False, 107 ) -> t.Optional[exp.Expression]: 108 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 109 unpivot = self._match(TokenType.UNPIVOT) 110 table = super()._parse_table( 111 schema=schema, 112 joins=joins, 113 alias_tokens=alias_tokens, 114 parse_bracket=parse_bracket, 115 is_db_reference=is_db_reference, 116 ) 117 118 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 119 120 def _parse_convert( 121 self, strict: bool, safe: t.Optional[bool] = None 122 ) -> t.Optional[exp.Expression]: 123 to = self._parse_types() 124 self._match(TokenType.COMMA) 125 this = self._parse_bitwise() 126 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 127 128 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 129 index = self._index - 1 130 func = self._parse_function() 131 132 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 133 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 134 self._retreat(index) 135 return None 136 137 def _parse_projections(self) -> t.Tuple[t.List[exp.Expression], t.List[exp.Expression]]: 138 projections, _ = super()._parse_projections() 139 if self._prev and self._prev.text.upper() == "EXCLUDE" and self._curr: 140 self._retreat(self._index - 1) 141 142 # EXCLUDE clause always comes at the end of the projection list and applies to it as a whole 143 exclude = ( 144 self._parse_wrapped_csv(self._parse_expression, optional=True) 145 if self._match_text_seq("EXCLUDE") 146 else [] 147 ) 148 149 if ( 150 exclude 151 and isinstance(expr := projections[-1], exp.Alias) 152 and expr.alias.upper() == "EXCLUDE" 153 ): 154 projections[-1] = expr.this.pop() 155 156 return projections, exclude
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
Inherited Members
- sqlglot.parser.Parser
- Parser
- STRUCT_TYPE_TOKENS
- NESTED_TYPE_TOKENS
- ENUM_TYPE_TOKENS
- AGGREGATE_TYPE_TOKENS
- TYPE_TOKENS
- SIGNED_TO_UNSIGNED_TYPE_TOKEN
- SUBQUERY_PREDICATES
- RESERVED_TOKENS
- DB_CREATABLES
- CREATABLES
- TRIGGER_EVENTS
- ALTERABLES
- ALIAS_TOKENS
- COLON_PLACEHOLDER_TOKENS
- ARRAY_CONSTRUCTORS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- ASSIGNMENT
- DISJUNCTION
- EQUALITY
- COMPARISON
- TERM
- FACTOR
- TIMES
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- CAST_COLUMN_OPERATORS
- EXPRESSION_PARSERS
- STRING_PARSERS
- NUMERIC_PARSERS
- PRIMARY_PARSERS
- PIPE_SYNTAX_TRANSFORM_PARSERS
- CONSTRAINT_PARSERS
- ALTER_PARSERS
- ALTER_ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- INVALID_FUNC_NAME_TOKENS
- FUNCTIONS_WITH_ALIASED_ARGS
- KEY_VALUE_DEFINITIONS
- QUERY_MODIFIER_PARSERS
- QUERY_MODIFIER_TOKENS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_LITERAL_PARSERS
- TYPE_CONVERTERS
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- CONFLICT_ACTIONS
- TRIGGER_TIMING
- TRIGGER_DEFERRABLE
- CREATE_SEQUENCE
- ISOLATED_LOADING_OPTIONS
- USABLES
- CAST_ACTIONS
- SCHEMA_BINDING_OPTIONS
- PROCEDURE_OPTIONS
- EXECUTE_AS_OPTIONS
- KEY_CONSTRAINT_OPTIONS
- WINDOW_EXCLUDE_OPTIONS
- INSERT_ALTERNATIVES
- CLONE_KEYWORDS
- HISTORICAL_DATA_PREFIX
- HISTORICAL_DATA_KIND
- OPCLASS_FOLLOW_KEYWORDS
- OPTYPE_FOLLOW_TOKENS
- TABLE_INDEX_HINT_TOKENS
- VIEW_ATTRIBUTES
- WINDOW_ALIAS_TOKENS
- WINDOW_BEFORE_PAREN_TOKENS
- WINDOW_SIDES
- JSON_KEY_VALUE_SEPARATOR_TOKENS
- FETCH_TOKENS
- ADD_CONSTRAINT_TOKENS
- DISTINCT_TOKENS
- UNNEST_OFFSET_ALIAS_TOKENS
- SELECT_START_TOKENS
- COPY_INTO_VARLEN_OPTIONS
- IS_JSON_PREDICATE_KIND
- ODBC_DATETIME_LITERALS
- ON_CONDITION_TOKENS
- PRIVILEGE_FOLLOW_TOKENS
- DESCRIBE_STYLES
- SET_ASSIGNMENT_DELIMITERS
- ANALYZE_STYLES
- ANALYZE_EXPRESSION_PARSERS
- PARTITION_KEYWORDS
- AMBIGUOUS_ALIAS_TOKENS
- OPERATION_MODIFIERS
- RECURSIVE_CTE_SEARCH_KIND
- MODIFIABLES
- STRICT_CAST
- PREFIXED_PIVOT_COLUMNS
- IDENTIFY_PIVOT_STRINGS
- LOG_DEFAULTS_TO_LN
- TABLESAMPLE_CSV
- DEFAULT_SAMPLING_METHOD
- SET_REQUIRES_ASSIGNMENT_DELIMITER
- TRIM_PATTERN_FIRST
- STRING_ALIASES
- MODIFIERS_ATTACHED_TO_SET_OP
- SET_OP_MODIFIERS
- NO_PAREN_IF_COMMANDS
- COLON_IS_VARIANT_EXTRACT
- VALUES_FOLLOWED_BY_PAREN
- INTERVAL_SPANS
- SUPPORTS_PARTITION_SELECTION
- WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
- OPTIONAL_ALIAS_TOKEN_CTE
- ALTER_RENAME_REQUIRES_COLUMN
- ALTER_TABLE_PARTITIONS
- JOINS_HAVE_EQUAL_PRECEDENCE
- ZONE_AWARE_TIMESTAMP_CONSTRUCTOR
- MAP_KEYS_ARE_ARBITRARY_EXPRESSIONS
- JSON_EXTRACT_REQUIRES_JSON_EXPRESSION
- ADD_JOIN_ON_TRUE
- raise_error
- validate_expression
- reset
- errors
- error_level
- error_message_context
- max_errors
- dialect
- sql
- parse
- parse_into
- check_errors
- expression
- parse_set_operation
- build_cast
158 class Tokenizer(Postgres.Tokenizer): 159 BIT_STRINGS = [] 160 HEX_STRINGS = [] 161 STRING_ESCAPES = ["\\", "'"] 162 163 KEYWORDS = { 164 **Postgres.Tokenizer.KEYWORDS, 165 "(+)": TokenType.JOIN_MARKER, 166 "HLLSKETCH": TokenType.HLLSKETCH, 167 "MINUS": TokenType.EXCEPT, 168 "SUPER": TokenType.SUPER, 169 "TOP": TokenType.TOP, 170 "UNLOAD": TokenType.COMMAND, 171 "VARBYTE": TokenType.VARBINARY, 172 "BINARY VARYING": TokenType.VARBINARY, 173 } 174 KEYWORDS.pop("VALUES") 175 176 # Redshift allows # to appear as a table identifier prefix 177 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 178 SINGLE_TOKENS.pop("#")
Inherited Members
180 class Generator(Postgres.Generator): 181 LOCKING_READS_SUPPORTED = False 182 QUERY_HINTS = False 183 VALUES_AS_TABLE = False 184 TZ_TO_WITH_TIME_ZONE = True 185 NVL2_SUPPORTED = True 186 LAST_DAY_SUPPORTS_DATE_PART = False 187 CAN_IMPLEMENT_ARRAY_ANY = False 188 MULTI_ARG_DISTINCT = True 189 COPY_PARAMS_ARE_WRAPPED = False 190 HEX_FUNC = "TO_HEX" 191 PARSE_JSON_NAME = "JSON_PARSE" 192 ARRAY_CONCAT_IS_VAR_LEN = False 193 SUPPORTS_CONVERT_TIMEZONE = True 194 EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False 195 SUPPORTS_MEDIAN = True 196 ALTER_SET_TYPE = "TYPE" 197 SUPPORTS_DECODE_CASE = True 198 SUPPORTS_BETWEEN_FLAGS = False 199 LIMIT_FETCH = "LIMIT" 200 STAR_EXCEPT = "EXCLUDE" 201 STAR_EXCLUDE_REQUIRES_DERIVED_TABLE = False 202 203 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 204 WITH_PROPERTIES_PREFIX = " " 205 206 TYPE_MAPPING = { 207 **Postgres.Generator.TYPE_MAPPING, 208 exp.DataType.Type.BINARY: "VARBYTE", 209 exp.DataType.Type.BLOB: "VARBYTE", 210 exp.DataType.Type.INT: "INTEGER", 211 exp.DataType.Type.TIMETZ: "TIME", 212 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 213 exp.DataType.Type.VARBINARY: "VARBYTE", 214 exp.DataType.Type.ROWVERSION: "VARBYTE", 215 } 216 217 TRANSFORMS = { 218 **Postgres.Generator.TRANSFORMS, 219 exp.ArrayConcat: array_concat_sql("ARRAY_CONCAT"), 220 exp.Concat: concat_to_dpipe_sql, 221 exp.ConcatWs: concat_ws_to_dpipe_sql, 222 exp.ApproxDistinct: lambda self, 223 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 224 exp.CurrentTimestamp: lambda self, e: ( 225 "SYSDATE" if e.args.get("sysdate") else "GETDATE()" 226 ), 227 exp.DateAdd: date_delta_sql("DATEADD"), 228 exp.DateDiff: date_delta_sql("DATEDIFF"), 229 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 230 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 231 exp.Explode: lambda self, e: self.explode_sql(e), 232 exp.FarmFingerprint: rename_func("FARMFINGERPRINT64"), 233 exp.FromBase: rename_func("STRTOL"), 234 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 235 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 236 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 237 exp.GroupConcat: rename_func("LISTAGG"), 238 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 239 exp.RegexpExtract: rename_func("REGEXP_SUBSTR"), 240 exp.Select: transforms.preprocess( 241 [ 242 transforms.eliminate_window_clause, 243 transforms.eliminate_distinct_on, 244 transforms.eliminate_semi_and_anti_joins, 245 transforms.unqualify_unnest, 246 transforms.unnest_generate_date_array_using_recursive_cte, 247 ] 248 ), 249 exp.SortKeyProperty: lambda self, 250 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 251 exp.StartsWith: lambda self, 252 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 253 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 254 exp.TableSample: no_tablesample_sql, 255 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 256 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 257 exp.UnixToTime: lambda self, e: self._unix_to_time_sql(e), 258 exp.SHA2Digest: lambda self, e: self.func( 259 "SHA2", e.this, e.args.get("length") or exp.Literal.number(256) 260 ), 261 } 262 263 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 264 TRANSFORMS.pop(exp.Pivot) 265 266 # Postgres doesn't support JSON_PARSE, but Redshift does 267 TRANSFORMS.pop(exp.ParseJSON) 268 269 # Redshift supports these functions 270 TRANSFORMS.pop(exp.AnyValue) 271 TRANSFORMS.pop(exp.LastDay) 272 TRANSFORMS.pop(exp.SHA2) 273 274 # Postgres and Redshift have different semantics for Getbit 275 TRANSFORMS.pop(exp.Getbit) 276 277 # Postgres does not permit a double precision argument in ROUND; Redshift does 278 TRANSFORMS.pop(exp.Round) 279 280 RESERVED_KEYWORDS = { 281 "aes128", 282 "aes256", 283 "all", 284 "allowoverwrite", 285 "analyse", 286 "analyze", 287 "and", 288 "any", 289 "array", 290 "as", 291 "asc", 292 "authorization", 293 "az64", 294 "backup", 295 "between", 296 "binary", 297 "blanksasnull", 298 "both", 299 "bytedict", 300 "bzip2", 301 "case", 302 "cast", 303 "check", 304 "collate", 305 "column", 306 "constraint", 307 "create", 308 "credentials", 309 "cross", 310 "current_date", 311 "current_time", 312 "current_timestamp", 313 "current_user", 314 "current_user_id", 315 "default", 316 "deferrable", 317 "deflate", 318 "defrag", 319 "delta", 320 "delta32k", 321 "desc", 322 "disable", 323 "distinct", 324 "do", 325 "else", 326 "emptyasnull", 327 "enable", 328 "encode", 329 "encrypt ", 330 "encryption", 331 "end", 332 "except", 333 "explicit", 334 "false", 335 "for", 336 "foreign", 337 "freeze", 338 "from", 339 "full", 340 "globaldict256", 341 "globaldict64k", 342 "grant", 343 "group", 344 "gzip", 345 "having", 346 "identity", 347 "ignore", 348 "ilike", 349 "in", 350 "initially", 351 "inner", 352 "intersect", 353 "interval", 354 "into", 355 "is", 356 "isnull", 357 "join", 358 "leading", 359 "left", 360 "like", 361 "limit", 362 "localtime", 363 "localtimestamp", 364 "lun", 365 "luns", 366 "lzo", 367 "lzop", 368 "minus", 369 "mostly16", 370 "mostly32", 371 "mostly8", 372 "natural", 373 "new", 374 "not", 375 "notnull", 376 "null", 377 "nulls", 378 "off", 379 "offline", 380 "offset", 381 "oid", 382 "old", 383 "on", 384 "only", 385 "open", 386 "or", 387 "order", 388 "outer", 389 "overlaps", 390 "parallel", 391 "partition", 392 "percent", 393 "permissions", 394 "pivot", 395 "placing", 396 "primary", 397 "raw", 398 "readratio", 399 "recover", 400 "references", 401 "rejectlog", 402 "resort", 403 "respect", 404 "restore", 405 "right", 406 "select", 407 "session_user", 408 "similar", 409 "snapshot", 410 "some", 411 "sysdate", 412 "system", 413 "table", 414 "tag", 415 "tdes", 416 "text255", 417 "text32k", 418 "then", 419 "timestamp", 420 "to", 421 "top", 422 "trailing", 423 "true", 424 "truncatecolumns", 425 "type", 426 "union", 427 "unique", 428 "unnest", 429 "unpivot", 430 "user", 431 "using", 432 "verbose", 433 "wallet", 434 "when", 435 "where", 436 "with", 437 "without", 438 } 439 440 def unnest_sql(self, expression: exp.Unnest) -> str: 441 args = expression.expressions 442 num_args = len(args) 443 444 if num_args != 1: 445 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 446 return "" 447 448 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 449 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 450 return "" 451 452 arg = self.sql(seq_get(args, 0)) 453 454 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 455 return f"{arg} AS {alias}" if alias else arg 456 457 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 458 if expression.is_type(exp.DataType.Type.JSON): 459 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 460 return self.sql(expression, "this") 461 462 return super().cast_sql(expression, safe_prefix=safe_prefix) 463 464 def datatype_sql(self, expression: exp.DataType) -> str: 465 """ 466 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 467 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 468 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 469 `TEXT` to `VARCHAR`. 470 """ 471 if expression.is_type("text"): 472 expression.set("this", exp.DataType.Type.VARCHAR) 473 precision = expression.args.get("expressions") 474 475 if not precision: 476 expression.append("expressions", exp.var("MAX")) 477 478 return super().datatype_sql(expression) 479 480 def alterset_sql(self, expression: exp.AlterSet) -> str: 481 exprs = self.expressions(expression, flat=True) 482 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 483 location = self.sql(expression, "location") 484 location = f" LOCATION {location}" if location else "" 485 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 486 file_format = f" FILE FORMAT {file_format}" if file_format else "" 487 488 return f"SET{exprs}{location}{file_format}" 489 490 def array_sql(self, expression: exp.Array) -> str: 491 if expression.args.get("bracket_notation"): 492 return super().array_sql(expression) 493 494 return rename_func("ARRAY")(self, expression) 495 496 def explode_sql(self, expression: exp.Explode) -> str: 497 self.unsupported("Unsupported EXPLODE() function") 498 return "" 499 500 def _unix_to_time_sql(self, expression: exp.UnixToTime) -> str: 501 scale = expression.args.get("scale") 502 this = self.sql(expression.this) 503 504 if scale is not None and scale != exp.UnixToTime.SECONDS and scale.is_int: 505 this = f"({this} / POWER(10, {scale.to_py()}))" 506 507 return f"(TIMESTAMP 'epoch' + {this} * INTERVAL '1 SECOND')"
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True: Always quote except for specials cases. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether to normalize identifiers to lowercase. Default: False.
- pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
- indent: The indentation size in a formatted string. For example, this affects the
indentation of subqueries and filters under a
WHEREclause. Default: 2. - normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether to preserve comments in the output SQL code. Default: True
440 def unnest_sql(self, expression: exp.Unnest) -> str: 441 args = expression.expressions 442 num_args = len(args) 443 444 if num_args != 1: 445 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 446 return "" 447 448 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 449 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 450 return "" 451 452 arg = self.sql(seq_get(args, 0)) 453 454 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 455 return f"{arg} AS {alias}" if alias else arg
457 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 458 if expression.is_type(exp.DataType.Type.JSON): 459 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 460 return self.sql(expression, "this") 461 462 return super().cast_sql(expression, safe_prefix=safe_prefix)
464 def datatype_sql(self, expression: exp.DataType) -> str: 465 """ 466 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 467 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 468 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 469 `TEXT` to `VARCHAR`. 470 """ 471 if expression.is_type("text"): 472 expression.set("this", exp.DataType.Type.VARCHAR) 473 precision = expression.args.get("expressions") 474 475 if not precision: 476 expression.append("expressions", exp.var("MAX")) 477 478 return super().datatype_sql(expression)
Redshift converts the TEXT data type to VARCHAR(255) by default when people more generally mean
VARCHAR of max length which is VARCHAR(max) in Redshift. Therefore if we get a TEXT data type
without precision we convert it to VARCHAR(max) and if it does have precision then we just convert
TEXT to VARCHAR.
480 def alterset_sql(self, expression: exp.AlterSet) -> str: 481 exprs = self.expressions(expression, flat=True) 482 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 483 location = self.sql(expression, "location") 484 location = f" LOCATION {location}" if location else "" 485 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 486 file_format = f" FILE FORMAT {file_format}" if file_format else "" 487 488 return f"SET{exprs}{location}{file_format}"
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- IGNORE_NULLS_IN_FUNC
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- INTERVAL_ALLOWS_PLURAL_FORM
- LIMIT_ONLY_LITERALS
- GROUPINGS_SEP
- INDEX_ON
- DIRECTED_JOINS
- QUERY_HINT_SEP
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- EXTRACT_ALLOWS_QUOTES
- ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
- UNNEST_WITH_ORDINALITY
- AGGREGATE_FILTER_SUPPORTED
- SEMI_ANTI_JOIN_WITH_SIDE
- COMPUTED_COLUMN_WITH_TYPE
- SUPPORTS_TABLE_COPY
- TABLESAMPLE_REQUIRES_PARENS
- TABLESAMPLE_KEYWORDS
- TABLESAMPLE_WITH_METHOD
- COLLATE_IS_FUNC
- DATA_TYPE_SPECIFIERS_ALLOWED
- ENSURE_BOOLS
- CTE_RECURSIVE_KEYWORD_REQUIRED
- SUPPORTS_SINGLE_ARG_CONCAT
- SUPPORTS_TABLE_ALIAS_COLUMNS
- UNPIVOT_ALIASES_ARE_IDENTIFIERS
- JSON_KEY_VALUE_PAIR_SEP
- INSERT_OVERWRITE
- SUPPORTS_CREATE_TABLE_LIKE
- JSON_PATH_BRACKETED_KEY_SUPPORTED
- JSON_PATH_SINGLE_QUOTE_ESCAPE
- SUPPORTS_TO_NUMBER
- SET_OP_MODIFIERS
- COPY_PARAMS_EQ_REQUIRED
- UNICODE_SUBSTITUTE
- QUOTE_JSON_PATH
- PAD_FILL_PATTERN_IS_REQUIRED
- SUPPORTS_EXPLODING_PROJECTIONS
- SUPPORTS_UNIX_SECONDS
- ALTER_SET_WRAPPED
- NORMALIZE_EXTRACT_DATE_PARTS
- ARRAY_SIZE_NAME
- SUPPORTS_LIKE_QUANTIFIERS
- MATCH_AGAINST_TABLE_PREFIX
- SET_ASSIGNMENT_REQUIRES_VARIABLE_KEYWORD
- DECLARE_DEFAULT_ASSIGNMENT
- UPDATE_STATEMENT_SUPPORTS_FROM
- UNSUPPORTED_TYPES
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
- WITH_SEPARATED_COMMENTS
- EXCLUDE_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- PARAMETERIZABLE_TEXT_TYPES
- EXPRESSIONS_WITHOUT_NESTED_CTES
- RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS
- SAFE_JSON_PATH_KEY_RE
- SENTINEL_LINE_BREAK
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- dialect
- normalize_functions
- unsupported_messages
- generate
- preprocess
- unsupported
- sep
- seg
- sanitize_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_parts
- column_sql
- pseudocolumn_sql
- columnposition_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- generatedasrowcolumnconstraint_sql
- periodforsystemtimeconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- inoutcolumnconstraint_sql
- createable_sql
- create_sql
- sequenceproperties_sql
- triggerproperties_sql
- triggerreferencing_sql
- triggerevent_sql
- clone_sql
- describe_sql
- heredoc_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- unicodestring_sql
- rawstring_sql
- datatypeparam_sql
- directory_sql
- delete_sql
- drop_sql
- set_operation
- set_operations
- fetch_sql
- limitoptions_sql
- filter_sql
- hint_sql
- indexparameters_sql
- index_sql
- identifier_sql
- hex_sql
- lowerhex_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- with_properties
- locate_properties
- property_name
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- partitionboundspec_sql
- partitionedofproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- withsystemversioningproperty_sql
- insert_sql
- introducer_sql
- kill_sql
- pseudotype_sql
- objectidentifier_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- historicaldata_sql
- table_parts
- table_sql
- tablefromrows_sql
- tablesample_sql
- pivot_sql
- version_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- groupingsets_sql
- rollup_sql
- rollupindex_sql
- rollupproperty_sql
- cube_sql
- group_sql
- having_sql
- connect_sql
- prior_sql
- join_sql
- lambda_sql
- lateral_op
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- queryband_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- booland_sql
- boolor_sql
- order_sql
- withfill_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognizemeasure_sql
- matchrecognize_sql
- query_modifiers
- options_modifier
- for_modifiers
- queryoption_sql
- offset_limit_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- subquery_sql
- qualify_sql
- prewhere_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_offset_expressions
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- convert_concat_args
- concat_sql
- concatws_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- jsonkeyvalue_sql
- jsonpath_sql
- json_path_part
- formatjson_sql
- formatphrase_sql
- jsonobject_sql
- jsonobjectagg_sql
- jsonarray_sql
- jsonarrayagg_sql
- jsoncolumndef_sql
- jsonschema_sql
- jsontable_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- pivotalias_sql
- aliases_sql
- atindex_sql
- attimezone_sql
- fromtimezone_sql
- add_sql
- and_sql
- or_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- strtotime_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- alterindex_sql
- alterdiststyle_sql
- altersortkey_sql
- alterrename_sql
- renamecolumn_sql
- alter_sql
- altersession_sql
- add_column_sql
- droppartition_sql
- addconstraint_sql
- addpartition_sql
- distinct_sql
- havingmax_sql
- intdiv_sql
- dpipe_sql
- div_sql
- safedivide_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- propertyeq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- is_sql
- like_sql
- ilike_sql
- match_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- sub_sql
- trycast_sql
- jsoncast_sql
- try_sql
- log_sql
- use_sql
- binary
- ceil_floor
- function_fallback_sql
- func
- format_args
- too_wide
- format_time
- expressions
- op_expressions
- naked_property
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- whens_sql
- merge_sql
- tochar_sql
- tonumber_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- duplicatekeyproperty_sql
- uniquekeyproperty_sql
- distributedbyproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- checkcolumnconstraint_sql
- indexcolumnconstraint_sql
- nvl2_sql
- comprehension_sql
- columnprefix_sql
- opclass_sql
- predict_sql
- generateembedding_sql
- mltranslate_sql
- mlforecast_sql
- featuresattime_sql
- vectorsearch_sql
- forin_sql
- refresh_sql
- toarray_sql
- tsordstotime_sql
- tsordstotimestamp_sql
- tsordstodatetime_sql
- tsordstodate_sql
- unixdate_sql
- lastday_sql
- dateadd_sql
- arrayany_sql
- struct_sql
- partitionrange_sql
- truncatetable_sql
- convert_sql
- copyparameter_sql
- credentials_sql
- copy_sql
- semicolon_sql
- datadeletionproperty_sql
- maskingpolicycolumnconstraint_sql
- gapfill_sql
- scope_resolution
- scoperesolution_sql
- parsejson_sql
- rand_sql
- changes_sql
- pad_sql
- summarize_sql
- explodinggenerateseries_sql
- converttimezone_sql
- json_sql
- jsonvalue_sql
- conditionalinsert_sql
- multitableinserts_sql
- oncondition_sql
- jsonextractquote_sql
- jsonexists_sql
- arrayagg_sql
- slice_sql
- apply_sql
- grant_sql
- revoke_sql
- grantprivilege_sql
- grantprincipal_sql
- columns_sql
- overlay_sql
- todouble_sql
- string_sql
- median_sql
- overflowtruncatebehavior_sql
- unixseconds_sql
- arraysize_sql
- attach_sql
- detach_sql
- attachoption_sql
- watermarkcolumnconstraint_sql
- encodeproperty_sql
- includeproperty_sql
- xmlelement_sql
- xmlkeyvalueoption_sql
- partitionbyrangeproperty_sql
- partitionbyrangepropertydynamic_sql
- unpivotcolumns_sql
- analyzesample_sql
- analyzestatistics_sql
- analyzehistogram_sql
- analyzedelete_sql
- analyzelistchainedrows_sql
- analyzevalidate_sql
- analyze_sql
- xmltable_sql
- xmlnamespace_sql
- export_sql
- declare_sql
- declareitem_sql
- recursivewithsearch_sql
- parameterizedagg_sql
- anonymousaggfunc_sql
- combinedaggfunc_sql
- combinedparameterizedagg_sql
- show_sql
- install_sql
- get_put_sql
- translatecharacters_sql
- decodecase_sql
- semanticview_sql
- getextract_sql
- datefromunixdate_sql
- space_sql
- buildproperty_sql
- refreshtriggerproperty_sql
- modelattribute_sql
- directorystage_sql
- uuid_sql
- initcap_sql
- localtime_sql
- localtimestamp_sql
- weekstart_sql
- chr_sql
- block_sql
- storedprocedure_sql
- ifblock_sql
- whileblock_sql
- execute_sql
- executesql_sql
- sqlglot.dialects.postgres.Postgres.Generator
- SINGLE_STRING_INTERVAL
- RENAME_TABLE_WITH_DB
- JOIN_HINTS
- TABLE_HINTS
- PARAMETER_TOKEN
- NAMED_PLACEHOLDER_TOKEN
- TABLESAMPLE_SIZE_IS_ROWS
- TABLESAMPLE_SEED_KEYWORD
- SUPPORTS_SELECT_INTO
- JSON_TYPE_REQUIRED_FOR_EXTRACTION
- SUPPORTS_UNLOGGED_TABLES
- LIKE_PROPERTY_INSIDE_SCHEMA
- SUPPORTS_WINDOW_EXCLUDE
- COPY_HAS_INTO_KEYWORD
- ARRAY_SIZE_DIM_REQUIRED
- INOUT_SEPARATOR
- SUPPORTED_JSON_PATH_PARTS
- lateral_sql
- PROPERTIES_LOCATION
- schemacommentproperty_sql
- commentcolumnconstraint_sql
- columndef_sql
- bracket_sql
- matchagainst_sql
- computedcolumnconstraint_sql
- isascii_sql
- ignorenulls_sql
- respectnulls_sql
- currentschema_sql
- interval_sql
- placeholder_sql
- arraycontains_sql