sqlglot.dialects.redshift
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, transforms 6from sqlglot.dialects.dialect import ( 7 NormalizationStrategy, 8 concat_to_dpipe_sql, 9 concat_ws_to_dpipe_sql, 10 date_delta_sql, 11 generatedasidentitycolumnconstraint_sql, 12 json_extract_segments, 13 no_tablesample_sql, 14 rename_func, 15 map_date_part, 16) 17from sqlglot.dialects.postgres import Postgres 18from sqlglot.helper import seq_get 19from sqlglot.tokens import TokenType 20from sqlglot.parser import build_convert_timezone 21 22if t.TYPE_CHECKING: 23 from sqlglot._typing import E 24 25 26def _build_date_delta(expr_type: t.Type[E]) -> t.Callable[[t.List], E]: 27 def _builder(args: t.List) -> E: 28 expr = expr_type( 29 this=seq_get(args, 2), 30 expression=seq_get(args, 1), 31 unit=map_date_part(seq_get(args, 0)), 32 ) 33 if expr_type is exp.TsOrDsAdd: 34 expr.set("return_type", exp.DataType.build("TIMESTAMP")) 35 36 return expr 37 38 return _builder 39 40 41class Redshift(Postgres): 42 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 43 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 44 45 SUPPORTS_USER_DEFINED_TYPES = False 46 INDEX_OFFSET = 0 47 COPY_PARAMS_ARE_CSV = False 48 HEX_LOWERCASE = True 49 HAS_DISTINCT_ARRAY_CONSTRUCTORS = True 50 COALESCE_COMPARISON_NON_STANDARD = True 51 REGEXP_EXTRACT_POSITION_OVERFLOW_RETURNS_NULL = False 52 53 # ref: https://docs.aws.amazon.com/redshift/latest/dg/r_FORMAT_strings.html 54 TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" 55 TIME_MAPPING = {**Postgres.TIME_MAPPING, "MON": "%b", "HH24": "%H", "HH": "%I"} 56 57 class Parser(Postgres.Parser): 58 FUNCTIONS = { 59 **Postgres.Parser.FUNCTIONS, 60 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 61 this=seq_get(args, 0), 62 expression=seq_get(args, 1), 63 unit=exp.var("month"), 64 return_type=exp.DataType.build("TIMESTAMP"), 65 ), 66 "CONVERT_TIMEZONE": lambda args: build_convert_timezone(args, "UTC"), 67 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 68 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 69 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 70 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 71 "GETDATE": exp.CurrentTimestamp.from_arg_list, 72 "LISTAGG": exp.GroupConcat.from_arg_list, 73 "REGEXP_SUBSTR": lambda args: exp.RegexpExtract( 74 this=seq_get(args, 0), 75 expression=seq_get(args, 1), 76 position=seq_get(args, 2), 77 occurrence=seq_get(args, 3), 78 parameters=seq_get(args, 4), 79 ), 80 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 81 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 82 ), 83 "STRTOL": exp.FromBase.from_arg_list, 84 } 85 FUNCTIONS.pop("GET_BIT") 86 87 NO_PAREN_FUNCTION_PARSERS = { 88 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 89 "APPROXIMATE": lambda self: self._parse_approximate_count(), 90 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 91 } 92 93 SUPPORTS_IMPLICIT_UNNEST = True 94 95 def _parse_table( 96 self, 97 schema: bool = False, 98 joins: bool = False, 99 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 100 parse_bracket: bool = False, 101 is_db_reference: bool = False, 102 parse_partition: bool = False, 103 consume_pipe: bool = False, 104 ) -> t.Optional[exp.Expression]: 105 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 106 unpivot = self._match(TokenType.UNPIVOT) 107 table = super()._parse_table( 108 schema=schema, 109 joins=joins, 110 alias_tokens=alias_tokens, 111 parse_bracket=parse_bracket, 112 is_db_reference=is_db_reference, 113 ) 114 115 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 116 117 def _parse_convert( 118 self, strict: bool, safe: t.Optional[bool] = None 119 ) -> t.Optional[exp.Expression]: 120 to = self._parse_types() 121 self._match(TokenType.COMMA) 122 this = self._parse_bitwise() 123 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 124 125 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 126 index = self._index - 1 127 func = self._parse_function() 128 129 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 130 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 131 self._retreat(index) 132 return None 133 134 class Tokenizer(Postgres.Tokenizer): 135 BIT_STRINGS = [] 136 HEX_STRINGS = [] 137 STRING_ESCAPES = ["\\", "'"] 138 139 KEYWORDS = { 140 **Postgres.Tokenizer.KEYWORDS, 141 "(+)": TokenType.JOIN_MARKER, 142 "HLLSKETCH": TokenType.HLLSKETCH, 143 "MINUS": TokenType.EXCEPT, 144 "SUPER": TokenType.SUPER, 145 "TOP": TokenType.TOP, 146 "UNLOAD": TokenType.COMMAND, 147 "VARBYTE": TokenType.VARBINARY, 148 "BINARY VARYING": TokenType.VARBINARY, 149 } 150 KEYWORDS.pop("VALUES") 151 152 # Redshift allows # to appear as a table identifier prefix 153 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 154 SINGLE_TOKENS.pop("#") 155 156 class Generator(Postgres.Generator): 157 LOCKING_READS_SUPPORTED = False 158 QUERY_HINTS = False 159 VALUES_AS_TABLE = False 160 TZ_TO_WITH_TIME_ZONE = True 161 NVL2_SUPPORTED = True 162 LAST_DAY_SUPPORTS_DATE_PART = False 163 CAN_IMPLEMENT_ARRAY_ANY = False 164 MULTI_ARG_DISTINCT = True 165 COPY_PARAMS_ARE_WRAPPED = False 166 HEX_FUNC = "TO_HEX" 167 PARSE_JSON_NAME = "JSON_PARSE" 168 ARRAY_CONCAT_IS_VAR_LEN = False 169 SUPPORTS_CONVERT_TIMEZONE = True 170 EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False 171 SUPPORTS_MEDIAN = True 172 ALTER_SET_TYPE = "TYPE" 173 SUPPORTS_DECODE_CASE = True 174 SUPPORTS_BETWEEN_FLAGS = False 175 LIMIT_FETCH = "LIMIT" 176 177 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 178 WITH_PROPERTIES_PREFIX = " " 179 180 TYPE_MAPPING = { 181 **Postgres.Generator.TYPE_MAPPING, 182 exp.DataType.Type.BINARY: "VARBYTE", 183 exp.DataType.Type.BLOB: "VARBYTE", 184 exp.DataType.Type.INT: "INTEGER", 185 exp.DataType.Type.TIMETZ: "TIME", 186 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 187 exp.DataType.Type.VARBINARY: "VARBYTE", 188 exp.DataType.Type.ROWVERSION: "VARBYTE", 189 } 190 191 TRANSFORMS = { 192 **Postgres.Generator.TRANSFORMS, 193 exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CONCAT"), 194 exp.Concat: concat_to_dpipe_sql, 195 exp.ConcatWs: concat_ws_to_dpipe_sql, 196 exp.ApproxDistinct: lambda self, 197 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 198 exp.CurrentTimestamp: lambda self, e: ( 199 "SYSDATE" if e.args.get("sysdate") else "GETDATE()" 200 ), 201 exp.DateAdd: date_delta_sql("DATEADD"), 202 exp.DateDiff: date_delta_sql("DATEDIFF"), 203 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 204 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 205 exp.Explode: lambda self, e: self.explode_sql(e), 206 exp.FarmFingerprint: rename_func("FARMFINGERPRINT64"), 207 exp.FromBase: rename_func("STRTOL"), 208 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 209 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 210 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 211 exp.GroupConcat: rename_func("LISTAGG"), 212 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 213 exp.RegexpExtract: rename_func("REGEXP_SUBSTR"), 214 exp.Select: transforms.preprocess( 215 [ 216 transforms.eliminate_window_clause, 217 transforms.eliminate_distinct_on, 218 transforms.eliminate_semi_and_anti_joins, 219 transforms.unqualify_unnest, 220 transforms.unnest_generate_date_array_using_recursive_cte, 221 ] 222 ), 223 exp.SortKeyProperty: lambda self, 224 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 225 exp.StartsWith: lambda self, 226 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 227 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 228 exp.TableSample: no_tablesample_sql, 229 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 230 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 231 exp.UnixToTime: lambda self, e: self._unix_to_time_sql(e), 232 exp.SHA2Digest: lambda self, e: self.func( 233 "SHA2", e.this, e.args.get("length") or exp.Literal.number(256) 234 ), 235 } 236 237 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 238 TRANSFORMS.pop(exp.Pivot) 239 240 # Postgres doesn't support JSON_PARSE, but Redshift does 241 TRANSFORMS.pop(exp.ParseJSON) 242 243 # Redshift supports these functions 244 TRANSFORMS.pop(exp.AnyValue) 245 TRANSFORMS.pop(exp.LastDay) 246 TRANSFORMS.pop(exp.SHA2) 247 248 # Postgres and Redshift have different semantics for Getbit 249 TRANSFORMS.pop(exp.Getbit) 250 251 # Postgres does not permit a double precision argument in ROUND; Redshift does 252 TRANSFORMS.pop(exp.Round) 253 254 RESERVED_KEYWORDS = { 255 "aes128", 256 "aes256", 257 "all", 258 "allowoverwrite", 259 "analyse", 260 "analyze", 261 "and", 262 "any", 263 "array", 264 "as", 265 "asc", 266 "authorization", 267 "az64", 268 "backup", 269 "between", 270 "binary", 271 "blanksasnull", 272 "both", 273 "bytedict", 274 "bzip2", 275 "case", 276 "cast", 277 "check", 278 "collate", 279 "column", 280 "constraint", 281 "create", 282 "credentials", 283 "cross", 284 "current_date", 285 "current_time", 286 "current_timestamp", 287 "current_user", 288 "current_user_id", 289 "default", 290 "deferrable", 291 "deflate", 292 "defrag", 293 "delta", 294 "delta32k", 295 "desc", 296 "disable", 297 "distinct", 298 "do", 299 "else", 300 "emptyasnull", 301 "enable", 302 "encode", 303 "encrypt ", 304 "encryption", 305 "end", 306 "except", 307 "explicit", 308 "false", 309 "for", 310 "foreign", 311 "freeze", 312 "from", 313 "full", 314 "globaldict256", 315 "globaldict64k", 316 "grant", 317 "group", 318 "gzip", 319 "having", 320 "identity", 321 "ignore", 322 "ilike", 323 "in", 324 "initially", 325 "inner", 326 "intersect", 327 "interval", 328 "into", 329 "is", 330 "isnull", 331 "join", 332 "leading", 333 "left", 334 "like", 335 "limit", 336 "localtime", 337 "localtimestamp", 338 "lun", 339 "luns", 340 "lzo", 341 "lzop", 342 "minus", 343 "mostly16", 344 "mostly32", 345 "mostly8", 346 "natural", 347 "new", 348 "not", 349 "notnull", 350 "null", 351 "nulls", 352 "off", 353 "offline", 354 "offset", 355 "oid", 356 "old", 357 "on", 358 "only", 359 "open", 360 "or", 361 "order", 362 "outer", 363 "overlaps", 364 "parallel", 365 "partition", 366 "percent", 367 "permissions", 368 "pivot", 369 "placing", 370 "primary", 371 "raw", 372 "readratio", 373 "recover", 374 "references", 375 "rejectlog", 376 "resort", 377 "respect", 378 "restore", 379 "right", 380 "select", 381 "session_user", 382 "similar", 383 "snapshot", 384 "some", 385 "sysdate", 386 "system", 387 "table", 388 "tag", 389 "tdes", 390 "text255", 391 "text32k", 392 "then", 393 "timestamp", 394 "to", 395 "top", 396 "trailing", 397 "true", 398 "truncatecolumns", 399 "type", 400 "union", 401 "unique", 402 "unnest", 403 "unpivot", 404 "user", 405 "using", 406 "verbose", 407 "wallet", 408 "when", 409 "where", 410 "with", 411 "without", 412 } 413 414 def unnest_sql(self, expression: exp.Unnest) -> str: 415 args = expression.expressions 416 num_args = len(args) 417 418 if num_args != 1: 419 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 420 return "" 421 422 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 423 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 424 return "" 425 426 arg = self.sql(seq_get(args, 0)) 427 428 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 429 return f"{arg} AS {alias}" if alias else arg 430 431 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 432 if expression.is_type(exp.DataType.Type.JSON): 433 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 434 return self.sql(expression, "this") 435 436 return super().cast_sql(expression, safe_prefix=safe_prefix) 437 438 def datatype_sql(self, expression: exp.DataType) -> str: 439 """ 440 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 441 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 442 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 443 `TEXT` to `VARCHAR`. 444 """ 445 if expression.is_type("text"): 446 expression.set("this", exp.DataType.Type.VARCHAR) 447 precision = expression.args.get("expressions") 448 449 if not precision: 450 expression.append("expressions", exp.var("MAX")) 451 452 return super().datatype_sql(expression) 453 454 def alterset_sql(self, expression: exp.AlterSet) -> str: 455 exprs = self.expressions(expression, flat=True) 456 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 457 location = self.sql(expression, "location") 458 location = f" LOCATION {location}" if location else "" 459 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 460 file_format = f" FILE FORMAT {file_format}" if file_format else "" 461 462 return f"SET{exprs}{location}{file_format}" 463 464 def array_sql(self, expression: exp.Array) -> str: 465 if expression.args.get("bracket_notation"): 466 return super().array_sql(expression) 467 468 return rename_func("ARRAY")(self, expression) 469 470 def explode_sql(self, expression: exp.Explode) -> str: 471 self.unsupported("Unsupported EXPLODE() function") 472 return "" 473 474 def _unix_to_time_sql(self, expression: exp.UnixToTime) -> str: 475 scale = expression.args.get("scale") 476 this = self.sql(expression.this) 477 478 if scale is not None and scale != exp.UnixToTime.SECONDS and scale.is_int: 479 this = f"({this} / POWER(10, {scale.to_py()}))" 480 481 return f"(TIMESTAMP 'epoch' + {this} * INTERVAL '1 SECOND')"
42class Redshift(Postgres): 43 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 44 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 45 46 SUPPORTS_USER_DEFINED_TYPES = False 47 INDEX_OFFSET = 0 48 COPY_PARAMS_ARE_CSV = False 49 HEX_LOWERCASE = True 50 HAS_DISTINCT_ARRAY_CONSTRUCTORS = True 51 COALESCE_COMPARISON_NON_STANDARD = True 52 REGEXP_EXTRACT_POSITION_OVERFLOW_RETURNS_NULL = False 53 54 # ref: https://docs.aws.amazon.com/redshift/latest/dg/r_FORMAT_strings.html 55 TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" 56 TIME_MAPPING = {**Postgres.TIME_MAPPING, "MON": "%b", "HH24": "%H", "HH": "%I"} 57 58 class Parser(Postgres.Parser): 59 FUNCTIONS = { 60 **Postgres.Parser.FUNCTIONS, 61 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 62 this=seq_get(args, 0), 63 expression=seq_get(args, 1), 64 unit=exp.var("month"), 65 return_type=exp.DataType.build("TIMESTAMP"), 66 ), 67 "CONVERT_TIMEZONE": lambda args: build_convert_timezone(args, "UTC"), 68 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 69 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 70 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 71 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 72 "GETDATE": exp.CurrentTimestamp.from_arg_list, 73 "LISTAGG": exp.GroupConcat.from_arg_list, 74 "REGEXP_SUBSTR": lambda args: exp.RegexpExtract( 75 this=seq_get(args, 0), 76 expression=seq_get(args, 1), 77 position=seq_get(args, 2), 78 occurrence=seq_get(args, 3), 79 parameters=seq_get(args, 4), 80 ), 81 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 82 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 83 ), 84 "STRTOL": exp.FromBase.from_arg_list, 85 } 86 FUNCTIONS.pop("GET_BIT") 87 88 NO_PAREN_FUNCTION_PARSERS = { 89 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 90 "APPROXIMATE": lambda self: self._parse_approximate_count(), 91 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 92 } 93 94 SUPPORTS_IMPLICIT_UNNEST = True 95 96 def _parse_table( 97 self, 98 schema: bool = False, 99 joins: bool = False, 100 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 101 parse_bracket: bool = False, 102 is_db_reference: bool = False, 103 parse_partition: bool = False, 104 consume_pipe: bool = False, 105 ) -> t.Optional[exp.Expression]: 106 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 107 unpivot = self._match(TokenType.UNPIVOT) 108 table = super()._parse_table( 109 schema=schema, 110 joins=joins, 111 alias_tokens=alias_tokens, 112 parse_bracket=parse_bracket, 113 is_db_reference=is_db_reference, 114 ) 115 116 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 117 118 def _parse_convert( 119 self, strict: bool, safe: t.Optional[bool] = None 120 ) -> t.Optional[exp.Expression]: 121 to = self._parse_types() 122 self._match(TokenType.COMMA) 123 this = self._parse_bitwise() 124 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 125 126 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 127 index = self._index - 1 128 func = self._parse_function() 129 130 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 131 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 132 self._retreat(index) 133 return None 134 135 class Tokenizer(Postgres.Tokenizer): 136 BIT_STRINGS = [] 137 HEX_STRINGS = [] 138 STRING_ESCAPES = ["\\", "'"] 139 140 KEYWORDS = { 141 **Postgres.Tokenizer.KEYWORDS, 142 "(+)": TokenType.JOIN_MARKER, 143 "HLLSKETCH": TokenType.HLLSKETCH, 144 "MINUS": TokenType.EXCEPT, 145 "SUPER": TokenType.SUPER, 146 "TOP": TokenType.TOP, 147 "UNLOAD": TokenType.COMMAND, 148 "VARBYTE": TokenType.VARBINARY, 149 "BINARY VARYING": TokenType.VARBINARY, 150 } 151 KEYWORDS.pop("VALUES") 152 153 # Redshift allows # to appear as a table identifier prefix 154 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 155 SINGLE_TOKENS.pop("#") 156 157 class Generator(Postgres.Generator): 158 LOCKING_READS_SUPPORTED = False 159 QUERY_HINTS = False 160 VALUES_AS_TABLE = False 161 TZ_TO_WITH_TIME_ZONE = True 162 NVL2_SUPPORTED = True 163 LAST_DAY_SUPPORTS_DATE_PART = False 164 CAN_IMPLEMENT_ARRAY_ANY = False 165 MULTI_ARG_DISTINCT = True 166 COPY_PARAMS_ARE_WRAPPED = False 167 HEX_FUNC = "TO_HEX" 168 PARSE_JSON_NAME = "JSON_PARSE" 169 ARRAY_CONCAT_IS_VAR_LEN = False 170 SUPPORTS_CONVERT_TIMEZONE = True 171 EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False 172 SUPPORTS_MEDIAN = True 173 ALTER_SET_TYPE = "TYPE" 174 SUPPORTS_DECODE_CASE = True 175 SUPPORTS_BETWEEN_FLAGS = False 176 LIMIT_FETCH = "LIMIT" 177 178 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 179 WITH_PROPERTIES_PREFIX = " " 180 181 TYPE_MAPPING = { 182 **Postgres.Generator.TYPE_MAPPING, 183 exp.DataType.Type.BINARY: "VARBYTE", 184 exp.DataType.Type.BLOB: "VARBYTE", 185 exp.DataType.Type.INT: "INTEGER", 186 exp.DataType.Type.TIMETZ: "TIME", 187 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 188 exp.DataType.Type.VARBINARY: "VARBYTE", 189 exp.DataType.Type.ROWVERSION: "VARBYTE", 190 } 191 192 TRANSFORMS = { 193 **Postgres.Generator.TRANSFORMS, 194 exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CONCAT"), 195 exp.Concat: concat_to_dpipe_sql, 196 exp.ConcatWs: concat_ws_to_dpipe_sql, 197 exp.ApproxDistinct: lambda self, 198 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 199 exp.CurrentTimestamp: lambda self, e: ( 200 "SYSDATE" if e.args.get("sysdate") else "GETDATE()" 201 ), 202 exp.DateAdd: date_delta_sql("DATEADD"), 203 exp.DateDiff: date_delta_sql("DATEDIFF"), 204 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 205 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 206 exp.Explode: lambda self, e: self.explode_sql(e), 207 exp.FarmFingerprint: rename_func("FARMFINGERPRINT64"), 208 exp.FromBase: rename_func("STRTOL"), 209 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 210 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 211 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 212 exp.GroupConcat: rename_func("LISTAGG"), 213 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 214 exp.RegexpExtract: rename_func("REGEXP_SUBSTR"), 215 exp.Select: transforms.preprocess( 216 [ 217 transforms.eliminate_window_clause, 218 transforms.eliminate_distinct_on, 219 transforms.eliminate_semi_and_anti_joins, 220 transforms.unqualify_unnest, 221 transforms.unnest_generate_date_array_using_recursive_cte, 222 ] 223 ), 224 exp.SortKeyProperty: lambda self, 225 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 226 exp.StartsWith: lambda self, 227 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 228 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 229 exp.TableSample: no_tablesample_sql, 230 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 231 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 232 exp.UnixToTime: lambda self, e: self._unix_to_time_sql(e), 233 exp.SHA2Digest: lambda self, e: self.func( 234 "SHA2", e.this, e.args.get("length") or exp.Literal.number(256) 235 ), 236 } 237 238 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 239 TRANSFORMS.pop(exp.Pivot) 240 241 # Postgres doesn't support JSON_PARSE, but Redshift does 242 TRANSFORMS.pop(exp.ParseJSON) 243 244 # Redshift supports these functions 245 TRANSFORMS.pop(exp.AnyValue) 246 TRANSFORMS.pop(exp.LastDay) 247 TRANSFORMS.pop(exp.SHA2) 248 249 # Postgres and Redshift have different semantics for Getbit 250 TRANSFORMS.pop(exp.Getbit) 251 252 # Postgres does not permit a double precision argument in ROUND; Redshift does 253 TRANSFORMS.pop(exp.Round) 254 255 RESERVED_KEYWORDS = { 256 "aes128", 257 "aes256", 258 "all", 259 "allowoverwrite", 260 "analyse", 261 "analyze", 262 "and", 263 "any", 264 "array", 265 "as", 266 "asc", 267 "authorization", 268 "az64", 269 "backup", 270 "between", 271 "binary", 272 "blanksasnull", 273 "both", 274 "bytedict", 275 "bzip2", 276 "case", 277 "cast", 278 "check", 279 "collate", 280 "column", 281 "constraint", 282 "create", 283 "credentials", 284 "cross", 285 "current_date", 286 "current_time", 287 "current_timestamp", 288 "current_user", 289 "current_user_id", 290 "default", 291 "deferrable", 292 "deflate", 293 "defrag", 294 "delta", 295 "delta32k", 296 "desc", 297 "disable", 298 "distinct", 299 "do", 300 "else", 301 "emptyasnull", 302 "enable", 303 "encode", 304 "encrypt ", 305 "encryption", 306 "end", 307 "except", 308 "explicit", 309 "false", 310 "for", 311 "foreign", 312 "freeze", 313 "from", 314 "full", 315 "globaldict256", 316 "globaldict64k", 317 "grant", 318 "group", 319 "gzip", 320 "having", 321 "identity", 322 "ignore", 323 "ilike", 324 "in", 325 "initially", 326 "inner", 327 "intersect", 328 "interval", 329 "into", 330 "is", 331 "isnull", 332 "join", 333 "leading", 334 "left", 335 "like", 336 "limit", 337 "localtime", 338 "localtimestamp", 339 "lun", 340 "luns", 341 "lzo", 342 "lzop", 343 "minus", 344 "mostly16", 345 "mostly32", 346 "mostly8", 347 "natural", 348 "new", 349 "not", 350 "notnull", 351 "null", 352 "nulls", 353 "off", 354 "offline", 355 "offset", 356 "oid", 357 "old", 358 "on", 359 "only", 360 "open", 361 "or", 362 "order", 363 "outer", 364 "overlaps", 365 "parallel", 366 "partition", 367 "percent", 368 "permissions", 369 "pivot", 370 "placing", 371 "primary", 372 "raw", 373 "readratio", 374 "recover", 375 "references", 376 "rejectlog", 377 "resort", 378 "respect", 379 "restore", 380 "right", 381 "select", 382 "session_user", 383 "similar", 384 "snapshot", 385 "some", 386 "sysdate", 387 "system", 388 "table", 389 "tag", 390 "tdes", 391 "text255", 392 "text32k", 393 "then", 394 "timestamp", 395 "to", 396 "top", 397 "trailing", 398 "true", 399 "truncatecolumns", 400 "type", 401 "union", 402 "unique", 403 "unnest", 404 "unpivot", 405 "user", 406 "using", 407 "verbose", 408 "wallet", 409 "when", 410 "where", 411 "with", 412 "without", 413 } 414 415 def unnest_sql(self, expression: exp.Unnest) -> str: 416 args = expression.expressions 417 num_args = len(args) 418 419 if num_args != 1: 420 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 421 return "" 422 423 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 424 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 425 return "" 426 427 arg = self.sql(seq_get(args, 0)) 428 429 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 430 return f"{arg} AS {alias}" if alias else arg 431 432 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 433 if expression.is_type(exp.DataType.Type.JSON): 434 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 435 return self.sql(expression, "this") 436 437 return super().cast_sql(expression, safe_prefix=safe_prefix) 438 439 def datatype_sql(self, expression: exp.DataType) -> str: 440 """ 441 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 442 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 443 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 444 `TEXT` to `VARCHAR`. 445 """ 446 if expression.is_type("text"): 447 expression.set("this", exp.DataType.Type.VARCHAR) 448 precision = expression.args.get("expressions") 449 450 if not precision: 451 expression.append("expressions", exp.var("MAX")) 452 453 return super().datatype_sql(expression) 454 455 def alterset_sql(self, expression: exp.AlterSet) -> str: 456 exprs = self.expressions(expression, flat=True) 457 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 458 location = self.sql(expression, "location") 459 location = f" LOCATION {location}" if location else "" 460 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 461 file_format = f" FILE FORMAT {file_format}" if file_format else "" 462 463 return f"SET{exprs}{location}{file_format}" 464 465 def array_sql(self, expression: exp.Array) -> str: 466 if expression.args.get("bracket_notation"): 467 return super().array_sql(expression) 468 469 return rename_func("ARRAY")(self, expression) 470 471 def explode_sql(self, expression: exp.Explode) -> str: 472 self.unsupported("Unsupported EXPLODE() function") 473 return "" 474 475 def _unix_to_time_sql(self, expression: exp.UnixToTime) -> str: 476 scale = expression.args.get("scale") 477 this = self.sql(expression.this) 478 479 if scale is not None and scale != exp.UnixToTime.SECONDS and scale.is_int: 480 this = f"({this} / POWER(10, {scale.to_py()}))" 481 482 return f"(TIMESTAMP 'epoch' + {this} * INTERVAL '1 SECOND')"
Specifies the strategy according to which identifiers should be normalized.
Whether the ARRAY constructor is context-sensitive, i.e in Redshift ARRAY[1, 2, 3] != ARRAY(1, 2, 3) as the former is of type INT[] vs the latter which is SUPER
Whether COALESCE in comparisons has non-standard NULL semantics.
We can't convert COALESCE(x, 1) = 2 into NOT x IS NULL AND x = 2 for redshift,
because they are not always equivalent. For example, if x is NULL and it comes
from a table, then the result is NULL, despite FALSE AND NULL evaluating to FALSE.
In standard SQL and most dialects, these expressions are equivalent, but Redshift treats table NULLs differently in this context.
Whether REGEXP_EXTRACT returns NULL when the position arg exceeds the string length.
Associates this dialect's time formats with their equivalent Python strftime formats.
Mapping of an escaped sequence (\n) to its unescaped version (
).
58 class Parser(Postgres.Parser): 59 FUNCTIONS = { 60 **Postgres.Parser.FUNCTIONS, 61 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 62 this=seq_get(args, 0), 63 expression=seq_get(args, 1), 64 unit=exp.var("month"), 65 return_type=exp.DataType.build("TIMESTAMP"), 66 ), 67 "CONVERT_TIMEZONE": lambda args: build_convert_timezone(args, "UTC"), 68 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 69 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 70 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 71 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 72 "GETDATE": exp.CurrentTimestamp.from_arg_list, 73 "LISTAGG": exp.GroupConcat.from_arg_list, 74 "REGEXP_SUBSTR": lambda args: exp.RegexpExtract( 75 this=seq_get(args, 0), 76 expression=seq_get(args, 1), 77 position=seq_get(args, 2), 78 occurrence=seq_get(args, 3), 79 parameters=seq_get(args, 4), 80 ), 81 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 82 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 83 ), 84 "STRTOL": exp.FromBase.from_arg_list, 85 } 86 FUNCTIONS.pop("GET_BIT") 87 88 NO_PAREN_FUNCTION_PARSERS = { 89 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 90 "APPROXIMATE": lambda self: self._parse_approximate_count(), 91 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 92 } 93 94 SUPPORTS_IMPLICIT_UNNEST = True 95 96 def _parse_table( 97 self, 98 schema: bool = False, 99 joins: bool = False, 100 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 101 parse_bracket: bool = False, 102 is_db_reference: bool = False, 103 parse_partition: bool = False, 104 consume_pipe: bool = False, 105 ) -> t.Optional[exp.Expression]: 106 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 107 unpivot = self._match(TokenType.UNPIVOT) 108 table = super()._parse_table( 109 schema=schema, 110 joins=joins, 111 alias_tokens=alias_tokens, 112 parse_bracket=parse_bracket, 113 is_db_reference=is_db_reference, 114 ) 115 116 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 117 118 def _parse_convert( 119 self, strict: bool, safe: t.Optional[bool] = None 120 ) -> t.Optional[exp.Expression]: 121 to = self._parse_types() 122 self._match(TokenType.COMMA) 123 this = self._parse_bitwise() 124 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 125 126 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 127 index = self._index - 1 128 func = self._parse_function() 129 130 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 131 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 132 self._retreat(index) 133 return None
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
Inherited Members
- sqlglot.parser.Parser
- Parser
- STRUCT_TYPE_TOKENS
- NESTED_TYPE_TOKENS
- ENUM_TYPE_TOKENS
- AGGREGATE_TYPE_TOKENS
- TYPE_TOKENS
- SIGNED_TO_UNSIGNED_TYPE_TOKEN
- SUBQUERY_PREDICATES
- RESERVED_TOKENS
- DB_CREATABLES
- CREATABLES
- ALTERABLES
- ALIAS_TOKENS
- COLON_PLACEHOLDER_TOKENS
- ARRAY_CONSTRUCTORS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- ASSIGNMENT
- DISJUNCTION
- EQUALITY
- COMPARISON
- TERM
- FACTOR
- TIMES
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- CAST_COLUMN_OPERATORS
- EXPRESSION_PARSERS
- UNARY_PARSERS
- STRING_PARSERS
- NUMERIC_PARSERS
- PRIMARY_PARSERS
- PIPE_SYNTAX_TRANSFORM_PARSERS
- CONSTRAINT_PARSERS
- ALTER_PARSERS
- ALTER_ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- INVALID_FUNC_NAME_TOKENS
- FUNCTIONS_WITH_ALIASED_ARGS
- KEY_VALUE_DEFINITIONS
- QUERY_MODIFIER_PARSERS
- QUERY_MODIFIER_TOKENS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_LITERAL_PARSERS
- TYPE_CONVERTERS
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- CONFLICT_ACTIONS
- CREATE_SEQUENCE
- ISOLATED_LOADING_OPTIONS
- USABLES
- CAST_ACTIONS
- SCHEMA_BINDING_OPTIONS
- PROCEDURE_OPTIONS
- EXECUTE_AS_OPTIONS
- KEY_CONSTRAINT_OPTIONS
- WINDOW_EXCLUDE_OPTIONS
- INSERT_ALTERNATIVES
- CLONE_KEYWORDS
- HISTORICAL_DATA_PREFIX
- HISTORICAL_DATA_KIND
- OPCLASS_FOLLOW_KEYWORDS
- OPTYPE_FOLLOW_TOKENS
- TABLE_INDEX_HINT_TOKENS
- VIEW_ATTRIBUTES
- WINDOW_ALIAS_TOKENS
- WINDOW_BEFORE_PAREN_TOKENS
- WINDOW_SIDES
- JSON_KEY_VALUE_SEPARATOR_TOKENS
- FETCH_TOKENS
- ADD_CONSTRAINT_TOKENS
- DISTINCT_TOKENS
- UNNEST_OFFSET_ALIAS_TOKENS
- SELECT_START_TOKENS
- COPY_INTO_VARLEN_OPTIONS
- IS_JSON_PREDICATE_KIND
- ODBC_DATETIME_LITERALS
- ON_CONDITION_TOKENS
- PRIVILEGE_FOLLOW_TOKENS
- DESCRIBE_STYLES
- SET_ASSIGNMENT_DELIMITERS
- ANALYZE_STYLES
- ANALYZE_EXPRESSION_PARSERS
- PARTITION_KEYWORDS
- AMBIGUOUS_ALIAS_TOKENS
- OPERATION_MODIFIERS
- RECURSIVE_CTE_SEARCH_KIND
- MODIFIABLES
- STRICT_CAST
- PREFIXED_PIVOT_COLUMNS
- IDENTIFY_PIVOT_STRINGS
- LOG_DEFAULTS_TO_LN
- TABLESAMPLE_CSV
- DEFAULT_SAMPLING_METHOD
- SET_REQUIRES_ASSIGNMENT_DELIMITER
- TRIM_PATTERN_FIRST
- STRING_ALIASES
- MODIFIERS_ATTACHED_TO_SET_OP
- SET_OP_MODIFIERS
- NO_PAREN_IF_COMMANDS
- COLON_IS_VARIANT_EXTRACT
- VALUES_FOLLOWED_BY_PAREN
- INTERVAL_SPANS
- SUPPORTS_PARTITION_SELECTION
- WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
- OPTIONAL_ALIAS_TOKEN_CTE
- ALTER_RENAME_REQUIRES_COLUMN
- ALTER_TABLE_PARTITIONS
- JOINS_HAVE_EQUAL_PRECEDENCE
- ZONE_AWARE_TIMESTAMP_CONSTRUCTOR
- MAP_KEYS_ARE_ARBITRARY_EXPRESSIONS
- JSON_EXTRACT_REQUIRES_JSON_EXPRESSION
- ADD_JOIN_ON_TRUE
- error_level
- error_message_context
- max_errors
- dialect
- reset
- parse
- parse_into
- check_errors
- raise_error
- expression
- validate_expression
- parse_set_operation
- build_cast
- errors
- sql
135 class Tokenizer(Postgres.Tokenizer): 136 BIT_STRINGS = [] 137 HEX_STRINGS = [] 138 STRING_ESCAPES = ["\\", "'"] 139 140 KEYWORDS = { 141 **Postgres.Tokenizer.KEYWORDS, 142 "(+)": TokenType.JOIN_MARKER, 143 "HLLSKETCH": TokenType.HLLSKETCH, 144 "MINUS": TokenType.EXCEPT, 145 "SUPER": TokenType.SUPER, 146 "TOP": TokenType.TOP, 147 "UNLOAD": TokenType.COMMAND, 148 "VARBYTE": TokenType.VARBINARY, 149 "BINARY VARYING": TokenType.VARBINARY, 150 } 151 KEYWORDS.pop("VALUES") 152 153 # Redshift allows # to appear as a table identifier prefix 154 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 155 SINGLE_TOKENS.pop("#")
Inherited Members
- sqlglot.tokens.Tokenizer
- Tokenizer
- RAW_STRINGS
- UNICODE_STRINGS
- IDENTIFIERS
- QUOTES
- ESCAPE_FOLLOW_CHARS
- IDENTIFIER_ESCAPES
- STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS
- NESTED_COMMENTS
- HINT_START
- TOKENS_PRECEDING_HINT
- WHITE_SPACE
- COMMANDS
- COMMAND_PREFIX_TOKENS
- NUMERIC_LITERALS
- COMMENTS
- dialect
- use_rs_tokenizer
- reset
- tokenize
- tokenize_rs
- size
- sql
- tokens
157 class Generator(Postgres.Generator): 158 LOCKING_READS_SUPPORTED = False 159 QUERY_HINTS = False 160 VALUES_AS_TABLE = False 161 TZ_TO_WITH_TIME_ZONE = True 162 NVL2_SUPPORTED = True 163 LAST_DAY_SUPPORTS_DATE_PART = False 164 CAN_IMPLEMENT_ARRAY_ANY = False 165 MULTI_ARG_DISTINCT = True 166 COPY_PARAMS_ARE_WRAPPED = False 167 HEX_FUNC = "TO_HEX" 168 PARSE_JSON_NAME = "JSON_PARSE" 169 ARRAY_CONCAT_IS_VAR_LEN = False 170 SUPPORTS_CONVERT_TIMEZONE = True 171 EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False 172 SUPPORTS_MEDIAN = True 173 ALTER_SET_TYPE = "TYPE" 174 SUPPORTS_DECODE_CASE = True 175 SUPPORTS_BETWEEN_FLAGS = False 176 LIMIT_FETCH = "LIMIT" 177 178 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 179 WITH_PROPERTIES_PREFIX = " " 180 181 TYPE_MAPPING = { 182 **Postgres.Generator.TYPE_MAPPING, 183 exp.DataType.Type.BINARY: "VARBYTE", 184 exp.DataType.Type.BLOB: "VARBYTE", 185 exp.DataType.Type.INT: "INTEGER", 186 exp.DataType.Type.TIMETZ: "TIME", 187 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 188 exp.DataType.Type.VARBINARY: "VARBYTE", 189 exp.DataType.Type.ROWVERSION: "VARBYTE", 190 } 191 192 TRANSFORMS = { 193 **Postgres.Generator.TRANSFORMS, 194 exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CONCAT"), 195 exp.Concat: concat_to_dpipe_sql, 196 exp.ConcatWs: concat_ws_to_dpipe_sql, 197 exp.ApproxDistinct: lambda self, 198 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 199 exp.CurrentTimestamp: lambda self, e: ( 200 "SYSDATE" if e.args.get("sysdate") else "GETDATE()" 201 ), 202 exp.DateAdd: date_delta_sql("DATEADD"), 203 exp.DateDiff: date_delta_sql("DATEDIFF"), 204 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 205 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 206 exp.Explode: lambda self, e: self.explode_sql(e), 207 exp.FarmFingerprint: rename_func("FARMFINGERPRINT64"), 208 exp.FromBase: rename_func("STRTOL"), 209 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 210 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 211 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 212 exp.GroupConcat: rename_func("LISTAGG"), 213 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 214 exp.RegexpExtract: rename_func("REGEXP_SUBSTR"), 215 exp.Select: transforms.preprocess( 216 [ 217 transforms.eliminate_window_clause, 218 transforms.eliminate_distinct_on, 219 transforms.eliminate_semi_and_anti_joins, 220 transforms.unqualify_unnest, 221 transforms.unnest_generate_date_array_using_recursive_cte, 222 ] 223 ), 224 exp.SortKeyProperty: lambda self, 225 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 226 exp.StartsWith: lambda self, 227 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 228 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 229 exp.TableSample: no_tablesample_sql, 230 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 231 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 232 exp.UnixToTime: lambda self, e: self._unix_to_time_sql(e), 233 exp.SHA2Digest: lambda self, e: self.func( 234 "SHA2", e.this, e.args.get("length") or exp.Literal.number(256) 235 ), 236 } 237 238 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 239 TRANSFORMS.pop(exp.Pivot) 240 241 # Postgres doesn't support JSON_PARSE, but Redshift does 242 TRANSFORMS.pop(exp.ParseJSON) 243 244 # Redshift supports these functions 245 TRANSFORMS.pop(exp.AnyValue) 246 TRANSFORMS.pop(exp.LastDay) 247 TRANSFORMS.pop(exp.SHA2) 248 249 # Postgres and Redshift have different semantics for Getbit 250 TRANSFORMS.pop(exp.Getbit) 251 252 # Postgres does not permit a double precision argument in ROUND; Redshift does 253 TRANSFORMS.pop(exp.Round) 254 255 RESERVED_KEYWORDS = { 256 "aes128", 257 "aes256", 258 "all", 259 "allowoverwrite", 260 "analyse", 261 "analyze", 262 "and", 263 "any", 264 "array", 265 "as", 266 "asc", 267 "authorization", 268 "az64", 269 "backup", 270 "between", 271 "binary", 272 "blanksasnull", 273 "both", 274 "bytedict", 275 "bzip2", 276 "case", 277 "cast", 278 "check", 279 "collate", 280 "column", 281 "constraint", 282 "create", 283 "credentials", 284 "cross", 285 "current_date", 286 "current_time", 287 "current_timestamp", 288 "current_user", 289 "current_user_id", 290 "default", 291 "deferrable", 292 "deflate", 293 "defrag", 294 "delta", 295 "delta32k", 296 "desc", 297 "disable", 298 "distinct", 299 "do", 300 "else", 301 "emptyasnull", 302 "enable", 303 "encode", 304 "encrypt ", 305 "encryption", 306 "end", 307 "except", 308 "explicit", 309 "false", 310 "for", 311 "foreign", 312 "freeze", 313 "from", 314 "full", 315 "globaldict256", 316 "globaldict64k", 317 "grant", 318 "group", 319 "gzip", 320 "having", 321 "identity", 322 "ignore", 323 "ilike", 324 "in", 325 "initially", 326 "inner", 327 "intersect", 328 "interval", 329 "into", 330 "is", 331 "isnull", 332 "join", 333 "leading", 334 "left", 335 "like", 336 "limit", 337 "localtime", 338 "localtimestamp", 339 "lun", 340 "luns", 341 "lzo", 342 "lzop", 343 "minus", 344 "mostly16", 345 "mostly32", 346 "mostly8", 347 "natural", 348 "new", 349 "not", 350 "notnull", 351 "null", 352 "nulls", 353 "off", 354 "offline", 355 "offset", 356 "oid", 357 "old", 358 "on", 359 "only", 360 "open", 361 "or", 362 "order", 363 "outer", 364 "overlaps", 365 "parallel", 366 "partition", 367 "percent", 368 "permissions", 369 "pivot", 370 "placing", 371 "primary", 372 "raw", 373 "readratio", 374 "recover", 375 "references", 376 "rejectlog", 377 "resort", 378 "respect", 379 "restore", 380 "right", 381 "select", 382 "session_user", 383 "similar", 384 "snapshot", 385 "some", 386 "sysdate", 387 "system", 388 "table", 389 "tag", 390 "tdes", 391 "text255", 392 "text32k", 393 "then", 394 "timestamp", 395 "to", 396 "top", 397 "trailing", 398 "true", 399 "truncatecolumns", 400 "type", 401 "union", 402 "unique", 403 "unnest", 404 "unpivot", 405 "user", 406 "using", 407 "verbose", 408 "wallet", 409 "when", 410 "where", 411 "with", 412 "without", 413 } 414 415 def unnest_sql(self, expression: exp.Unnest) -> str: 416 args = expression.expressions 417 num_args = len(args) 418 419 if num_args != 1: 420 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 421 return "" 422 423 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 424 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 425 return "" 426 427 arg = self.sql(seq_get(args, 0)) 428 429 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 430 return f"{arg} AS {alias}" if alias else arg 431 432 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 433 if expression.is_type(exp.DataType.Type.JSON): 434 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 435 return self.sql(expression, "this") 436 437 return super().cast_sql(expression, safe_prefix=safe_prefix) 438 439 def datatype_sql(self, expression: exp.DataType) -> str: 440 """ 441 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 442 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 443 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 444 `TEXT` to `VARCHAR`. 445 """ 446 if expression.is_type("text"): 447 expression.set("this", exp.DataType.Type.VARCHAR) 448 precision = expression.args.get("expressions") 449 450 if not precision: 451 expression.append("expressions", exp.var("MAX")) 452 453 return super().datatype_sql(expression) 454 455 def alterset_sql(self, expression: exp.AlterSet) -> str: 456 exprs = self.expressions(expression, flat=True) 457 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 458 location = self.sql(expression, "location") 459 location = f" LOCATION {location}" if location else "" 460 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 461 file_format = f" FILE FORMAT {file_format}" if file_format else "" 462 463 return f"SET{exprs}{location}{file_format}" 464 465 def array_sql(self, expression: exp.Array) -> str: 466 if expression.args.get("bracket_notation"): 467 return super().array_sql(expression) 468 469 return rename_func("ARRAY")(self, expression) 470 471 def explode_sql(self, expression: exp.Explode) -> str: 472 self.unsupported("Unsupported EXPLODE() function") 473 return "" 474 475 def _unix_to_time_sql(self, expression: exp.UnixToTime) -> str: 476 scale = expression.args.get("scale") 477 this = self.sql(expression.this) 478 479 if scale is not None and scale != exp.UnixToTime.SECONDS and scale.is_int: 480 this = f"({this} / POWER(10, {scale.to_py()}))" 481 482 return f"(TIMESTAMP 'epoch' + {this} * INTERVAL '1 SECOND')"
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True: Always quote except for specials cases. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether to normalize identifiers to lowercase. Default: False.
- pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
- indent: The indentation size in a formatted string. For example, this affects the
indentation of subqueries and filters under a
WHEREclause. Default: 2. - normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether to preserve comments in the output SQL code. Default: True
415 def unnest_sql(self, expression: exp.Unnest) -> str: 416 args = expression.expressions 417 num_args = len(args) 418 419 if num_args != 1: 420 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 421 return "" 422 423 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 424 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 425 return "" 426 427 arg = self.sql(seq_get(args, 0)) 428 429 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 430 return f"{arg} AS {alias}" if alias else arg
432 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 433 if expression.is_type(exp.DataType.Type.JSON): 434 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 435 return self.sql(expression, "this") 436 437 return super().cast_sql(expression, safe_prefix=safe_prefix)
439 def datatype_sql(self, expression: exp.DataType) -> str: 440 """ 441 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 442 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 443 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 444 `TEXT` to `VARCHAR`. 445 """ 446 if expression.is_type("text"): 447 expression.set("this", exp.DataType.Type.VARCHAR) 448 precision = expression.args.get("expressions") 449 450 if not precision: 451 expression.append("expressions", exp.var("MAX")) 452 453 return super().datatype_sql(expression)
Redshift converts the TEXT data type to VARCHAR(255) by default when people more generally mean
VARCHAR of max length which is VARCHAR(max) in Redshift. Therefore if we get a TEXT data type
without precision we convert it to VARCHAR(max) and if it does have precision then we just convert
TEXT to VARCHAR.
455 def alterset_sql(self, expression: exp.AlterSet) -> str: 456 exprs = self.expressions(expression, flat=True) 457 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 458 location = self.sql(expression, "location") 459 location = f" LOCATION {location}" if location else "" 460 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 461 file_format = f" FILE FORMAT {file_format}" if file_format else "" 462 463 return f"SET{exprs}{location}{file_format}"
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- IGNORE_NULLS_IN_FUNC
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- INTERVAL_ALLOWS_PLURAL_FORM
- LIMIT_ONLY_LITERALS
- GROUPINGS_SEP
- INDEX_ON
- QUERY_HINT_SEP
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- EXTRACT_ALLOWS_QUOTES
- ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
- UNNEST_WITH_ORDINALITY
- AGGREGATE_FILTER_SUPPORTED
- SEMI_ANTI_JOIN_WITH_SIDE
- COMPUTED_COLUMN_WITH_TYPE
- SUPPORTS_TABLE_COPY
- TABLESAMPLE_REQUIRES_PARENS
- TABLESAMPLE_KEYWORDS
- TABLESAMPLE_WITH_METHOD
- COLLATE_IS_FUNC
- DATA_TYPE_SPECIFIERS_ALLOWED
- ENSURE_BOOLS
- CTE_RECURSIVE_KEYWORD_REQUIRED
- SUPPORTS_SINGLE_ARG_CONCAT
- SUPPORTS_TABLE_ALIAS_COLUMNS
- UNPIVOT_ALIASES_ARE_IDENTIFIERS
- JSON_KEY_VALUE_PAIR_SEP
- INSERT_OVERWRITE
- SUPPORTS_CREATE_TABLE_LIKE
- JSON_PATH_BRACKETED_KEY_SUPPORTED
- JSON_PATH_SINGLE_QUOTE_ESCAPE
- SUPPORTS_TO_NUMBER
- SET_OP_MODIFIERS
- COPY_PARAMS_EQ_REQUIRED
- UNICODE_SUBSTITUTE
- STAR_EXCEPT
- QUOTE_JSON_PATH
- PAD_FILL_PATTERN_IS_REQUIRED
- SUPPORTS_EXPLODING_PROJECTIONS
- SUPPORTS_UNIX_SECONDS
- ALTER_SET_WRAPPED
- NORMALIZE_EXTRACT_DATE_PARTS
- ARRAY_SIZE_NAME
- SUPPORTS_LIKE_QUANTIFIERS
- MATCH_AGAINST_TABLE_PREFIX
- SET_ASSIGNMENT_REQUIRES_VARIABLE_KEYWORD
- UPDATE_STATEMENT_SUPPORTS_FROM
- UNSUPPORTED_TYPES
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
- WITH_SEPARATED_COMMENTS
- EXCLUDE_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- PARAMETERIZABLE_TEXT_TYPES
- EXPRESSIONS_WITHOUT_NESTED_CTES
- RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS
- SAFE_JSON_PATH_KEY_RE
- SENTINEL_LINE_BREAK
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- dialect
- normalize_functions
- unsupported_messages
- generate
- preprocess
- unsupported
- sep
- seg
- sanitize_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_parts
- column_sql
- pseudocolumn_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- generatedasrowcolumnconstraint_sql
- periodforsystemtimeconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- inoutcolumnconstraint_sql
- createable_sql
- create_sql
- sequenceproperties_sql
- clone_sql
- describe_sql
- heredoc_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- unicodestring_sql
- rawstring_sql
- datatypeparam_sql
- directory_sql
- delete_sql
- drop_sql
- set_operation
- set_operations
- fetch_sql
- limitoptions_sql
- filter_sql
- hint_sql
- indexparameters_sql
- index_sql
- identifier_sql
- hex_sql
- lowerhex_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- with_properties
- locate_properties
- property_name
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- partitionboundspec_sql
- partitionedofproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- withsystemversioningproperty_sql
- insert_sql
- introducer_sql
- kill_sql
- pseudotype_sql
- objectidentifier_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- historicaldata_sql
- table_parts
- table_sql
- tablefromrows_sql
- tablesample_sql
- pivot_sql
- version_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- groupingsets_sql
- rollup_sql
- cube_sql
- group_sql
- having_sql
- connect_sql
- prior_sql
- join_sql
- lambda_sql
- lateral_op
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- queryband_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- booland_sql
- boolor_sql
- order_sql
- withfill_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognizemeasure_sql
- matchrecognize_sql
- query_modifiers
- options_modifier
- for_modifiers
- queryoption_sql
- offset_limit_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- subquery_sql
- qualify_sql
- prewhere_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_offset_expressions
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- convert_concat_args
- concat_sql
- concatws_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- jsonkeyvalue_sql
- jsonpath_sql
- json_path_part
- formatjson_sql
- formatphrase_sql
- jsonobject_sql
- jsonobjectagg_sql
- jsonarray_sql
- jsonarrayagg_sql
- jsoncolumndef_sql
- jsonschema_sql
- jsontable_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- pivotalias_sql
- aliases_sql
- atindex_sql
- attimezone_sql
- fromtimezone_sql
- add_sql
- and_sql
- or_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- strtotime_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- alterindex_sql
- alterdiststyle_sql
- altersortkey_sql
- alterrename_sql
- renamecolumn_sql
- alter_sql
- altersession_sql
- add_column_sql
- droppartition_sql
- addconstraint_sql
- addpartition_sql
- distinct_sql
- havingmax_sql
- intdiv_sql
- dpipe_sql
- div_sql
- safedivide_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- propertyeq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- is_sql
- like_sql
- ilike_sql
- match_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- sub_sql
- trycast_sql
- jsoncast_sql
- try_sql
- log_sql
- use_sql
- binary
- ceil_floor
- function_fallback_sql
- func
- format_args
- too_wide
- format_time
- expressions
- op_expressions
- naked_property
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- whens_sql
- merge_sql
- tochar_sql
- tonumber_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- duplicatekeyproperty_sql
- uniquekeyproperty_sql
- distributedbyproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- checkcolumnconstraint_sql
- indexcolumnconstraint_sql
- nvl2_sql
- comprehension_sql
- columnprefix_sql
- opclass_sql
- predict_sql
- generateembedding_sql
- mltranslate_sql
- mlforecast_sql
- featuresattime_sql
- vectorsearch_sql
- forin_sql
- refresh_sql
- toarray_sql
- tsordstotime_sql
- tsordstotimestamp_sql
- tsordstodatetime_sql
- tsordstodate_sql
- unixdate_sql
- lastday_sql
- dateadd_sql
- arrayany_sql
- struct_sql
- partitionrange_sql
- truncatetable_sql
- convert_sql
- copyparameter_sql
- credentials_sql
- copy_sql
- semicolon_sql
- datadeletionproperty_sql
- maskingpolicycolumnconstraint_sql
- gapfill_sql
- scope_resolution
- scoperesolution_sql
- parsejson_sql
- rand_sql
- changes_sql
- pad_sql
- summarize_sql
- explodinggenerateseries_sql
- arrayconcat_sql
- converttimezone_sql
- json_sql
- jsonvalue_sql
- conditionalinsert_sql
- multitableinserts_sql
- oncondition_sql
- jsonextractquote_sql
- jsonexists_sql
- arrayagg_sql
- slice_sql
- apply_sql
- grant_sql
- revoke_sql
- grantprivilege_sql
- grantprincipal_sql
- columns_sql
- overlay_sql
- todouble_sql
- string_sql
- median_sql
- overflowtruncatebehavior_sql
- unixseconds_sql
- arraysize_sql
- attach_sql
- detach_sql
- attachoption_sql
- watermarkcolumnconstraint_sql
- encodeproperty_sql
- includeproperty_sql
- xmlelement_sql
- xmlkeyvalueoption_sql
- partitionbyrangeproperty_sql
- partitionbyrangepropertydynamic_sql
- unpivotcolumns_sql
- analyzesample_sql
- analyzestatistics_sql
- analyzehistogram_sql
- analyzedelete_sql
- analyzelistchainedrows_sql
- analyzevalidate_sql
- analyze_sql
- xmltable_sql
- xmlnamespace_sql
- export_sql
- declare_sql
- declareitem_sql
- recursivewithsearch_sql
- parameterizedagg_sql
- anonymousaggfunc_sql
- combinedaggfunc_sql
- combinedparameterizedagg_sql
- show_sql
- install_sql
- get_put_sql
- translatecharacters_sql
- decodecase_sql
- semanticview_sql
- getextract_sql
- datefromunixdate_sql
- space_sql
- buildproperty_sql
- refreshtriggerproperty_sql
- modelattribute_sql
- directorystage_sql
- uuid_sql
- initcap_sql
- localtime_sql
- localtimestamp_sql
- weekstart_sql
- chr_sql
- sqlglot.dialects.postgres.Postgres.Generator
- SINGLE_STRING_INTERVAL
- RENAME_TABLE_WITH_DB
- JOIN_HINTS
- TABLE_HINTS
- PARAMETER_TOKEN
- NAMED_PLACEHOLDER_TOKEN
- TABLESAMPLE_SIZE_IS_ROWS
- TABLESAMPLE_SEED_KEYWORD
- SUPPORTS_SELECT_INTO
- JSON_TYPE_REQUIRED_FOR_EXTRACTION
- SUPPORTS_UNLOGGED_TABLES
- LIKE_PROPERTY_INSIDE_SCHEMA
- SUPPORTS_WINDOW_EXCLUDE
- COPY_HAS_INTO_KEYWORD
- ARRAY_SIZE_DIM_REQUIRED
- SUPPORTED_JSON_PATH_PARTS
- PROPERTIES_LOCATION
- schemacommentproperty_sql
- commentcolumnconstraint_sql
- bracket_sql
- matchagainst_sql
- computedcolumnconstraint_sql
- isascii_sql
- ignorenulls_sql
- respectnulls_sql
- currentschema_sql
- interval_sql
- placeholder_sql
- arraycontains_sql