sqlglot.dialects.redshift
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, transforms 6from sqlglot.dialects.dialect import ( 7 NormalizationStrategy, 8 array_concat_sql, 9 concat_to_dpipe_sql, 10 concat_ws_to_dpipe_sql, 11 date_delta_sql, 12 generatedasidentitycolumnconstraint_sql, 13 json_extract_segments, 14 no_tablesample_sql, 15 rename_func, 16 map_date_part, 17) 18from sqlglot.dialects.postgres import Postgres 19from sqlglot.helper import seq_get 20from sqlglot.tokens import TokenType 21from sqlglot.parser import build_convert_timezone 22 23if t.TYPE_CHECKING: 24 from sqlglot._typing import E 25 26 27def _build_date_delta(expr_type: t.Type[E]) -> t.Callable[[t.List], E]: 28 def _builder(args: t.List) -> E: 29 expr = expr_type( 30 this=seq_get(args, 2), 31 expression=seq_get(args, 1), 32 unit=map_date_part(seq_get(args, 0)), 33 ) 34 if expr_type is exp.TsOrDsAdd: 35 expr.set("return_type", exp.DataType.build("TIMESTAMP")) 36 37 return expr 38 39 return _builder 40 41 42class Redshift(Postgres): 43 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 44 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 45 46 SUPPORTS_USER_DEFINED_TYPES = False 47 INDEX_OFFSET = 0 48 COPY_PARAMS_ARE_CSV = False 49 HEX_LOWERCASE = True 50 HAS_DISTINCT_ARRAY_CONSTRUCTORS = True 51 COALESCE_COMPARISON_NON_STANDARD = True 52 REGEXP_EXTRACT_POSITION_OVERFLOW_RETURNS_NULL = False 53 ARRAY_FUNCS_PROPAGATES_NULLS = True 54 55 # ref: https://docs.aws.amazon.com/redshift/latest/dg/r_FORMAT_strings.html 56 TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" 57 TIME_MAPPING = {**Postgres.TIME_MAPPING, "MON": "%b", "HH24": "%H", "HH": "%I"} 58 59 class Parser(Postgres.Parser): 60 FUNCTIONS = { 61 **Postgres.Parser.FUNCTIONS, 62 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 63 this=seq_get(args, 0), 64 expression=seq_get(args, 1), 65 unit=exp.var("month"), 66 return_type=exp.DataType.build("TIMESTAMP"), 67 ), 68 "CONVERT_TIMEZONE": lambda args: build_convert_timezone(args, "UTC"), 69 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 70 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 71 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 72 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 73 "GETDATE": exp.CurrentTimestamp.from_arg_list, 74 "LISTAGG": exp.GroupConcat.from_arg_list, 75 "REGEXP_SUBSTR": lambda args: exp.RegexpExtract( 76 this=seq_get(args, 0), 77 expression=seq_get(args, 1), 78 position=seq_get(args, 2), 79 occurrence=seq_get(args, 3), 80 parameters=seq_get(args, 4), 81 ), 82 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 83 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 84 ), 85 "STRTOL": exp.FromBase.from_arg_list, 86 } 87 FUNCTIONS.pop("GET_BIT") 88 89 NO_PAREN_FUNCTION_PARSERS = { 90 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 91 "APPROXIMATE": lambda self: self._parse_approximate_count(), 92 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 93 } 94 95 SUPPORTS_IMPLICIT_UNNEST = True 96 97 def _parse_table( 98 self, 99 schema: bool = False, 100 joins: bool = False, 101 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 102 parse_bracket: bool = False, 103 is_db_reference: bool = False, 104 parse_partition: bool = False, 105 consume_pipe: bool = False, 106 ) -> t.Optional[exp.Expression]: 107 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 108 unpivot = self._match(TokenType.UNPIVOT) 109 table = super()._parse_table( 110 schema=schema, 111 joins=joins, 112 alias_tokens=alias_tokens, 113 parse_bracket=parse_bracket, 114 is_db_reference=is_db_reference, 115 ) 116 117 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 118 119 def _parse_convert( 120 self, strict: bool, safe: t.Optional[bool] = None 121 ) -> t.Optional[exp.Expression]: 122 to = self._parse_types() 123 self._match(TokenType.COMMA) 124 this = self._parse_bitwise() 125 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 126 127 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 128 index = self._index - 1 129 func = self._parse_function() 130 131 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 132 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 133 self._retreat(index) 134 return None 135 136 class Tokenizer(Postgres.Tokenizer): 137 BIT_STRINGS = [] 138 HEX_STRINGS = [] 139 STRING_ESCAPES = ["\\", "'"] 140 141 KEYWORDS = { 142 **Postgres.Tokenizer.KEYWORDS, 143 "(+)": TokenType.JOIN_MARKER, 144 "HLLSKETCH": TokenType.HLLSKETCH, 145 "MINUS": TokenType.EXCEPT, 146 "SUPER": TokenType.SUPER, 147 "TOP": TokenType.TOP, 148 "UNLOAD": TokenType.COMMAND, 149 "VARBYTE": TokenType.VARBINARY, 150 "BINARY VARYING": TokenType.VARBINARY, 151 } 152 KEYWORDS.pop("VALUES") 153 154 # Redshift allows # to appear as a table identifier prefix 155 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 156 SINGLE_TOKENS.pop("#") 157 158 class Generator(Postgres.Generator): 159 LOCKING_READS_SUPPORTED = False 160 QUERY_HINTS = False 161 VALUES_AS_TABLE = False 162 TZ_TO_WITH_TIME_ZONE = True 163 NVL2_SUPPORTED = True 164 LAST_DAY_SUPPORTS_DATE_PART = False 165 CAN_IMPLEMENT_ARRAY_ANY = False 166 MULTI_ARG_DISTINCT = True 167 COPY_PARAMS_ARE_WRAPPED = False 168 HEX_FUNC = "TO_HEX" 169 PARSE_JSON_NAME = "JSON_PARSE" 170 ARRAY_CONCAT_IS_VAR_LEN = False 171 SUPPORTS_CONVERT_TIMEZONE = True 172 EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False 173 SUPPORTS_MEDIAN = True 174 ALTER_SET_TYPE = "TYPE" 175 SUPPORTS_DECODE_CASE = True 176 SUPPORTS_BETWEEN_FLAGS = False 177 LIMIT_FETCH = "LIMIT" 178 179 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 180 WITH_PROPERTIES_PREFIX = " " 181 182 TYPE_MAPPING = { 183 **Postgres.Generator.TYPE_MAPPING, 184 exp.DataType.Type.BINARY: "VARBYTE", 185 exp.DataType.Type.BLOB: "VARBYTE", 186 exp.DataType.Type.INT: "INTEGER", 187 exp.DataType.Type.TIMETZ: "TIME", 188 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 189 exp.DataType.Type.VARBINARY: "VARBYTE", 190 exp.DataType.Type.ROWVERSION: "VARBYTE", 191 } 192 193 TRANSFORMS = { 194 **Postgres.Generator.TRANSFORMS, 195 exp.ArrayConcat: array_concat_sql("ARRAY_CONCAT"), 196 exp.Concat: concat_to_dpipe_sql, 197 exp.ConcatWs: concat_ws_to_dpipe_sql, 198 exp.ApproxDistinct: lambda self, 199 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 200 exp.CurrentTimestamp: lambda self, e: ( 201 "SYSDATE" if e.args.get("sysdate") else "GETDATE()" 202 ), 203 exp.DateAdd: date_delta_sql("DATEADD"), 204 exp.DateDiff: date_delta_sql("DATEDIFF"), 205 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 206 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 207 exp.Explode: lambda self, e: self.explode_sql(e), 208 exp.FarmFingerprint: rename_func("FARMFINGERPRINT64"), 209 exp.FromBase: rename_func("STRTOL"), 210 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 211 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 212 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 213 exp.GroupConcat: rename_func("LISTAGG"), 214 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 215 exp.RegexpExtract: rename_func("REGEXP_SUBSTR"), 216 exp.Select: transforms.preprocess( 217 [ 218 transforms.eliminate_window_clause, 219 transforms.eliminate_distinct_on, 220 transforms.eliminate_semi_and_anti_joins, 221 transforms.unqualify_unnest, 222 transforms.unnest_generate_date_array_using_recursive_cte, 223 ] 224 ), 225 exp.SortKeyProperty: lambda self, 226 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 227 exp.StartsWith: lambda self, 228 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 229 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 230 exp.TableSample: no_tablesample_sql, 231 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 232 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 233 exp.UnixToTime: lambda self, e: self._unix_to_time_sql(e), 234 exp.SHA2Digest: lambda self, e: self.func( 235 "SHA2", e.this, e.args.get("length") or exp.Literal.number(256) 236 ), 237 } 238 239 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 240 TRANSFORMS.pop(exp.Pivot) 241 242 # Postgres doesn't support JSON_PARSE, but Redshift does 243 TRANSFORMS.pop(exp.ParseJSON) 244 245 # Redshift supports these functions 246 TRANSFORMS.pop(exp.AnyValue) 247 TRANSFORMS.pop(exp.LastDay) 248 TRANSFORMS.pop(exp.SHA2) 249 250 # Postgres and Redshift have different semantics for Getbit 251 TRANSFORMS.pop(exp.Getbit) 252 253 # Postgres does not permit a double precision argument in ROUND; Redshift does 254 TRANSFORMS.pop(exp.Round) 255 256 RESERVED_KEYWORDS = { 257 "aes128", 258 "aes256", 259 "all", 260 "allowoverwrite", 261 "analyse", 262 "analyze", 263 "and", 264 "any", 265 "array", 266 "as", 267 "asc", 268 "authorization", 269 "az64", 270 "backup", 271 "between", 272 "binary", 273 "blanksasnull", 274 "both", 275 "bytedict", 276 "bzip2", 277 "case", 278 "cast", 279 "check", 280 "collate", 281 "column", 282 "constraint", 283 "create", 284 "credentials", 285 "cross", 286 "current_date", 287 "current_time", 288 "current_timestamp", 289 "current_user", 290 "current_user_id", 291 "default", 292 "deferrable", 293 "deflate", 294 "defrag", 295 "delta", 296 "delta32k", 297 "desc", 298 "disable", 299 "distinct", 300 "do", 301 "else", 302 "emptyasnull", 303 "enable", 304 "encode", 305 "encrypt ", 306 "encryption", 307 "end", 308 "except", 309 "explicit", 310 "false", 311 "for", 312 "foreign", 313 "freeze", 314 "from", 315 "full", 316 "globaldict256", 317 "globaldict64k", 318 "grant", 319 "group", 320 "gzip", 321 "having", 322 "identity", 323 "ignore", 324 "ilike", 325 "in", 326 "initially", 327 "inner", 328 "intersect", 329 "interval", 330 "into", 331 "is", 332 "isnull", 333 "join", 334 "leading", 335 "left", 336 "like", 337 "limit", 338 "localtime", 339 "localtimestamp", 340 "lun", 341 "luns", 342 "lzo", 343 "lzop", 344 "minus", 345 "mostly16", 346 "mostly32", 347 "mostly8", 348 "natural", 349 "new", 350 "not", 351 "notnull", 352 "null", 353 "nulls", 354 "off", 355 "offline", 356 "offset", 357 "oid", 358 "old", 359 "on", 360 "only", 361 "open", 362 "or", 363 "order", 364 "outer", 365 "overlaps", 366 "parallel", 367 "partition", 368 "percent", 369 "permissions", 370 "pivot", 371 "placing", 372 "primary", 373 "raw", 374 "readratio", 375 "recover", 376 "references", 377 "rejectlog", 378 "resort", 379 "respect", 380 "restore", 381 "right", 382 "select", 383 "session_user", 384 "similar", 385 "snapshot", 386 "some", 387 "sysdate", 388 "system", 389 "table", 390 "tag", 391 "tdes", 392 "text255", 393 "text32k", 394 "then", 395 "timestamp", 396 "to", 397 "top", 398 "trailing", 399 "true", 400 "truncatecolumns", 401 "type", 402 "union", 403 "unique", 404 "unnest", 405 "unpivot", 406 "user", 407 "using", 408 "verbose", 409 "wallet", 410 "when", 411 "where", 412 "with", 413 "without", 414 } 415 416 def unnest_sql(self, expression: exp.Unnest) -> str: 417 args = expression.expressions 418 num_args = len(args) 419 420 if num_args != 1: 421 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 422 return "" 423 424 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 425 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 426 return "" 427 428 arg = self.sql(seq_get(args, 0)) 429 430 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 431 return f"{arg} AS {alias}" if alias else arg 432 433 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 434 if expression.is_type(exp.DataType.Type.JSON): 435 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 436 return self.sql(expression, "this") 437 438 return super().cast_sql(expression, safe_prefix=safe_prefix) 439 440 def datatype_sql(self, expression: exp.DataType) -> str: 441 """ 442 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 443 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 444 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 445 `TEXT` to `VARCHAR`. 446 """ 447 if expression.is_type("text"): 448 expression.set("this", exp.DataType.Type.VARCHAR) 449 precision = expression.args.get("expressions") 450 451 if not precision: 452 expression.append("expressions", exp.var("MAX")) 453 454 return super().datatype_sql(expression) 455 456 def alterset_sql(self, expression: exp.AlterSet) -> str: 457 exprs = self.expressions(expression, flat=True) 458 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 459 location = self.sql(expression, "location") 460 location = f" LOCATION {location}" if location else "" 461 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 462 file_format = f" FILE FORMAT {file_format}" if file_format else "" 463 464 return f"SET{exprs}{location}{file_format}" 465 466 def array_sql(self, expression: exp.Array) -> str: 467 if expression.args.get("bracket_notation"): 468 return super().array_sql(expression) 469 470 return rename_func("ARRAY")(self, expression) 471 472 def explode_sql(self, expression: exp.Explode) -> str: 473 self.unsupported("Unsupported EXPLODE() function") 474 return "" 475 476 def _unix_to_time_sql(self, expression: exp.UnixToTime) -> str: 477 scale = expression.args.get("scale") 478 this = self.sql(expression.this) 479 480 if scale is not None and scale != exp.UnixToTime.SECONDS and scale.is_int: 481 this = f"({this} / POWER(10, {scale.to_py()}))" 482 483 return f"(TIMESTAMP 'epoch' + {this} * INTERVAL '1 SECOND')"
43class Redshift(Postgres): 44 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 45 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 46 47 SUPPORTS_USER_DEFINED_TYPES = False 48 INDEX_OFFSET = 0 49 COPY_PARAMS_ARE_CSV = False 50 HEX_LOWERCASE = True 51 HAS_DISTINCT_ARRAY_CONSTRUCTORS = True 52 COALESCE_COMPARISON_NON_STANDARD = True 53 REGEXP_EXTRACT_POSITION_OVERFLOW_RETURNS_NULL = False 54 ARRAY_FUNCS_PROPAGATES_NULLS = True 55 56 # ref: https://docs.aws.amazon.com/redshift/latest/dg/r_FORMAT_strings.html 57 TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" 58 TIME_MAPPING = {**Postgres.TIME_MAPPING, "MON": "%b", "HH24": "%H", "HH": "%I"} 59 60 class Parser(Postgres.Parser): 61 FUNCTIONS = { 62 **Postgres.Parser.FUNCTIONS, 63 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 64 this=seq_get(args, 0), 65 expression=seq_get(args, 1), 66 unit=exp.var("month"), 67 return_type=exp.DataType.build("TIMESTAMP"), 68 ), 69 "CONVERT_TIMEZONE": lambda args: build_convert_timezone(args, "UTC"), 70 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 71 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 72 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 73 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 74 "GETDATE": exp.CurrentTimestamp.from_arg_list, 75 "LISTAGG": exp.GroupConcat.from_arg_list, 76 "REGEXP_SUBSTR": lambda args: exp.RegexpExtract( 77 this=seq_get(args, 0), 78 expression=seq_get(args, 1), 79 position=seq_get(args, 2), 80 occurrence=seq_get(args, 3), 81 parameters=seq_get(args, 4), 82 ), 83 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 84 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 85 ), 86 "STRTOL": exp.FromBase.from_arg_list, 87 } 88 FUNCTIONS.pop("GET_BIT") 89 90 NO_PAREN_FUNCTION_PARSERS = { 91 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 92 "APPROXIMATE": lambda self: self._parse_approximate_count(), 93 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 94 } 95 96 SUPPORTS_IMPLICIT_UNNEST = True 97 98 def _parse_table( 99 self, 100 schema: bool = False, 101 joins: bool = False, 102 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 103 parse_bracket: bool = False, 104 is_db_reference: bool = False, 105 parse_partition: bool = False, 106 consume_pipe: bool = False, 107 ) -> t.Optional[exp.Expression]: 108 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 109 unpivot = self._match(TokenType.UNPIVOT) 110 table = super()._parse_table( 111 schema=schema, 112 joins=joins, 113 alias_tokens=alias_tokens, 114 parse_bracket=parse_bracket, 115 is_db_reference=is_db_reference, 116 ) 117 118 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 119 120 def _parse_convert( 121 self, strict: bool, safe: t.Optional[bool] = None 122 ) -> t.Optional[exp.Expression]: 123 to = self._parse_types() 124 self._match(TokenType.COMMA) 125 this = self._parse_bitwise() 126 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 127 128 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 129 index = self._index - 1 130 func = self._parse_function() 131 132 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 133 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 134 self._retreat(index) 135 return None 136 137 class Tokenizer(Postgres.Tokenizer): 138 BIT_STRINGS = [] 139 HEX_STRINGS = [] 140 STRING_ESCAPES = ["\\", "'"] 141 142 KEYWORDS = { 143 **Postgres.Tokenizer.KEYWORDS, 144 "(+)": TokenType.JOIN_MARKER, 145 "HLLSKETCH": TokenType.HLLSKETCH, 146 "MINUS": TokenType.EXCEPT, 147 "SUPER": TokenType.SUPER, 148 "TOP": TokenType.TOP, 149 "UNLOAD": TokenType.COMMAND, 150 "VARBYTE": TokenType.VARBINARY, 151 "BINARY VARYING": TokenType.VARBINARY, 152 } 153 KEYWORDS.pop("VALUES") 154 155 # Redshift allows # to appear as a table identifier prefix 156 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 157 SINGLE_TOKENS.pop("#") 158 159 class Generator(Postgres.Generator): 160 LOCKING_READS_SUPPORTED = False 161 QUERY_HINTS = False 162 VALUES_AS_TABLE = False 163 TZ_TO_WITH_TIME_ZONE = True 164 NVL2_SUPPORTED = True 165 LAST_DAY_SUPPORTS_DATE_PART = False 166 CAN_IMPLEMENT_ARRAY_ANY = False 167 MULTI_ARG_DISTINCT = True 168 COPY_PARAMS_ARE_WRAPPED = False 169 HEX_FUNC = "TO_HEX" 170 PARSE_JSON_NAME = "JSON_PARSE" 171 ARRAY_CONCAT_IS_VAR_LEN = False 172 SUPPORTS_CONVERT_TIMEZONE = True 173 EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False 174 SUPPORTS_MEDIAN = True 175 ALTER_SET_TYPE = "TYPE" 176 SUPPORTS_DECODE_CASE = True 177 SUPPORTS_BETWEEN_FLAGS = False 178 LIMIT_FETCH = "LIMIT" 179 180 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 181 WITH_PROPERTIES_PREFIX = " " 182 183 TYPE_MAPPING = { 184 **Postgres.Generator.TYPE_MAPPING, 185 exp.DataType.Type.BINARY: "VARBYTE", 186 exp.DataType.Type.BLOB: "VARBYTE", 187 exp.DataType.Type.INT: "INTEGER", 188 exp.DataType.Type.TIMETZ: "TIME", 189 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 190 exp.DataType.Type.VARBINARY: "VARBYTE", 191 exp.DataType.Type.ROWVERSION: "VARBYTE", 192 } 193 194 TRANSFORMS = { 195 **Postgres.Generator.TRANSFORMS, 196 exp.ArrayConcat: array_concat_sql("ARRAY_CONCAT"), 197 exp.Concat: concat_to_dpipe_sql, 198 exp.ConcatWs: concat_ws_to_dpipe_sql, 199 exp.ApproxDistinct: lambda self, 200 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 201 exp.CurrentTimestamp: lambda self, e: ( 202 "SYSDATE" if e.args.get("sysdate") else "GETDATE()" 203 ), 204 exp.DateAdd: date_delta_sql("DATEADD"), 205 exp.DateDiff: date_delta_sql("DATEDIFF"), 206 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 207 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 208 exp.Explode: lambda self, e: self.explode_sql(e), 209 exp.FarmFingerprint: rename_func("FARMFINGERPRINT64"), 210 exp.FromBase: rename_func("STRTOL"), 211 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 212 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 213 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 214 exp.GroupConcat: rename_func("LISTAGG"), 215 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 216 exp.RegexpExtract: rename_func("REGEXP_SUBSTR"), 217 exp.Select: transforms.preprocess( 218 [ 219 transforms.eliminate_window_clause, 220 transforms.eliminate_distinct_on, 221 transforms.eliminate_semi_and_anti_joins, 222 transforms.unqualify_unnest, 223 transforms.unnest_generate_date_array_using_recursive_cte, 224 ] 225 ), 226 exp.SortKeyProperty: lambda self, 227 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 228 exp.StartsWith: lambda self, 229 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 230 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 231 exp.TableSample: no_tablesample_sql, 232 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 233 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 234 exp.UnixToTime: lambda self, e: self._unix_to_time_sql(e), 235 exp.SHA2Digest: lambda self, e: self.func( 236 "SHA2", e.this, e.args.get("length") or exp.Literal.number(256) 237 ), 238 } 239 240 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 241 TRANSFORMS.pop(exp.Pivot) 242 243 # Postgres doesn't support JSON_PARSE, but Redshift does 244 TRANSFORMS.pop(exp.ParseJSON) 245 246 # Redshift supports these functions 247 TRANSFORMS.pop(exp.AnyValue) 248 TRANSFORMS.pop(exp.LastDay) 249 TRANSFORMS.pop(exp.SHA2) 250 251 # Postgres and Redshift have different semantics for Getbit 252 TRANSFORMS.pop(exp.Getbit) 253 254 # Postgres does not permit a double precision argument in ROUND; Redshift does 255 TRANSFORMS.pop(exp.Round) 256 257 RESERVED_KEYWORDS = { 258 "aes128", 259 "aes256", 260 "all", 261 "allowoverwrite", 262 "analyse", 263 "analyze", 264 "and", 265 "any", 266 "array", 267 "as", 268 "asc", 269 "authorization", 270 "az64", 271 "backup", 272 "between", 273 "binary", 274 "blanksasnull", 275 "both", 276 "bytedict", 277 "bzip2", 278 "case", 279 "cast", 280 "check", 281 "collate", 282 "column", 283 "constraint", 284 "create", 285 "credentials", 286 "cross", 287 "current_date", 288 "current_time", 289 "current_timestamp", 290 "current_user", 291 "current_user_id", 292 "default", 293 "deferrable", 294 "deflate", 295 "defrag", 296 "delta", 297 "delta32k", 298 "desc", 299 "disable", 300 "distinct", 301 "do", 302 "else", 303 "emptyasnull", 304 "enable", 305 "encode", 306 "encrypt ", 307 "encryption", 308 "end", 309 "except", 310 "explicit", 311 "false", 312 "for", 313 "foreign", 314 "freeze", 315 "from", 316 "full", 317 "globaldict256", 318 "globaldict64k", 319 "grant", 320 "group", 321 "gzip", 322 "having", 323 "identity", 324 "ignore", 325 "ilike", 326 "in", 327 "initially", 328 "inner", 329 "intersect", 330 "interval", 331 "into", 332 "is", 333 "isnull", 334 "join", 335 "leading", 336 "left", 337 "like", 338 "limit", 339 "localtime", 340 "localtimestamp", 341 "lun", 342 "luns", 343 "lzo", 344 "lzop", 345 "minus", 346 "mostly16", 347 "mostly32", 348 "mostly8", 349 "natural", 350 "new", 351 "not", 352 "notnull", 353 "null", 354 "nulls", 355 "off", 356 "offline", 357 "offset", 358 "oid", 359 "old", 360 "on", 361 "only", 362 "open", 363 "or", 364 "order", 365 "outer", 366 "overlaps", 367 "parallel", 368 "partition", 369 "percent", 370 "permissions", 371 "pivot", 372 "placing", 373 "primary", 374 "raw", 375 "readratio", 376 "recover", 377 "references", 378 "rejectlog", 379 "resort", 380 "respect", 381 "restore", 382 "right", 383 "select", 384 "session_user", 385 "similar", 386 "snapshot", 387 "some", 388 "sysdate", 389 "system", 390 "table", 391 "tag", 392 "tdes", 393 "text255", 394 "text32k", 395 "then", 396 "timestamp", 397 "to", 398 "top", 399 "trailing", 400 "true", 401 "truncatecolumns", 402 "type", 403 "union", 404 "unique", 405 "unnest", 406 "unpivot", 407 "user", 408 "using", 409 "verbose", 410 "wallet", 411 "when", 412 "where", 413 "with", 414 "without", 415 } 416 417 def unnest_sql(self, expression: exp.Unnest) -> str: 418 args = expression.expressions 419 num_args = len(args) 420 421 if num_args != 1: 422 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 423 return "" 424 425 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 426 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 427 return "" 428 429 arg = self.sql(seq_get(args, 0)) 430 431 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 432 return f"{arg} AS {alias}" if alias else arg 433 434 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 435 if expression.is_type(exp.DataType.Type.JSON): 436 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 437 return self.sql(expression, "this") 438 439 return super().cast_sql(expression, safe_prefix=safe_prefix) 440 441 def datatype_sql(self, expression: exp.DataType) -> str: 442 """ 443 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 444 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 445 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 446 `TEXT` to `VARCHAR`. 447 """ 448 if expression.is_type("text"): 449 expression.set("this", exp.DataType.Type.VARCHAR) 450 precision = expression.args.get("expressions") 451 452 if not precision: 453 expression.append("expressions", exp.var("MAX")) 454 455 return super().datatype_sql(expression) 456 457 def alterset_sql(self, expression: exp.AlterSet) -> str: 458 exprs = self.expressions(expression, flat=True) 459 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 460 location = self.sql(expression, "location") 461 location = f" LOCATION {location}" if location else "" 462 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 463 file_format = f" FILE FORMAT {file_format}" if file_format else "" 464 465 return f"SET{exprs}{location}{file_format}" 466 467 def array_sql(self, expression: exp.Array) -> str: 468 if expression.args.get("bracket_notation"): 469 return super().array_sql(expression) 470 471 return rename_func("ARRAY")(self, expression) 472 473 def explode_sql(self, expression: exp.Explode) -> str: 474 self.unsupported("Unsupported EXPLODE() function") 475 return "" 476 477 def _unix_to_time_sql(self, expression: exp.UnixToTime) -> str: 478 scale = expression.args.get("scale") 479 this = self.sql(expression.this) 480 481 if scale is not None and scale != exp.UnixToTime.SECONDS and scale.is_int: 482 this = f"({this} / POWER(10, {scale.to_py()}))" 483 484 return f"(TIMESTAMP 'epoch' + {this} * INTERVAL '1 SECOND')"
Specifies the strategy according to which identifiers should be normalized.
Whether the ARRAY constructor is context-sensitive, i.e in Redshift ARRAY[1, 2, 3] != ARRAY(1, 2, 3) as the former is of type INT[] vs the latter which is SUPER
Whether COALESCE in comparisons has non-standard NULL semantics.
We can't convert COALESCE(x, 1) = 2 into NOT x IS NULL AND x = 2 for redshift,
because they are not always equivalent. For example, if x is NULL and it comes
from a table, then the result is NULL, despite FALSE AND NULL evaluating to FALSE.
In standard SQL and most dialects, these expressions are equivalent, but Redshift treats table NULLs differently in this context.
Whether REGEXP_EXTRACT returns NULL when the position arg exceeds the string length.
Whether Array update functions return NULL when the input array is NULL.
Associates this dialect's time formats with their equivalent Python strftime formats.
Mapping of an escaped sequence (\n) to its unescaped version (
).
60 class Parser(Postgres.Parser): 61 FUNCTIONS = { 62 **Postgres.Parser.FUNCTIONS, 63 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 64 this=seq_get(args, 0), 65 expression=seq_get(args, 1), 66 unit=exp.var("month"), 67 return_type=exp.DataType.build("TIMESTAMP"), 68 ), 69 "CONVERT_TIMEZONE": lambda args: build_convert_timezone(args, "UTC"), 70 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 71 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 72 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 73 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 74 "GETDATE": exp.CurrentTimestamp.from_arg_list, 75 "LISTAGG": exp.GroupConcat.from_arg_list, 76 "REGEXP_SUBSTR": lambda args: exp.RegexpExtract( 77 this=seq_get(args, 0), 78 expression=seq_get(args, 1), 79 position=seq_get(args, 2), 80 occurrence=seq_get(args, 3), 81 parameters=seq_get(args, 4), 82 ), 83 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 84 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 85 ), 86 "STRTOL": exp.FromBase.from_arg_list, 87 } 88 FUNCTIONS.pop("GET_BIT") 89 90 NO_PAREN_FUNCTION_PARSERS = { 91 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 92 "APPROXIMATE": lambda self: self._parse_approximate_count(), 93 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 94 } 95 96 SUPPORTS_IMPLICIT_UNNEST = True 97 98 def _parse_table( 99 self, 100 schema: bool = False, 101 joins: bool = False, 102 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 103 parse_bracket: bool = False, 104 is_db_reference: bool = False, 105 parse_partition: bool = False, 106 consume_pipe: bool = False, 107 ) -> t.Optional[exp.Expression]: 108 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 109 unpivot = self._match(TokenType.UNPIVOT) 110 table = super()._parse_table( 111 schema=schema, 112 joins=joins, 113 alias_tokens=alias_tokens, 114 parse_bracket=parse_bracket, 115 is_db_reference=is_db_reference, 116 ) 117 118 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 119 120 def _parse_convert( 121 self, strict: bool, safe: t.Optional[bool] = None 122 ) -> t.Optional[exp.Expression]: 123 to = self._parse_types() 124 self._match(TokenType.COMMA) 125 this = self._parse_bitwise() 126 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 127 128 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 129 index = self._index - 1 130 func = self._parse_function() 131 132 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 133 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 134 self._retreat(index) 135 return None
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
Inherited Members
- sqlglot.parser.Parser
- Parser
- STRUCT_TYPE_TOKENS
- NESTED_TYPE_TOKENS
- ENUM_TYPE_TOKENS
- AGGREGATE_TYPE_TOKENS
- TYPE_TOKENS
- SIGNED_TO_UNSIGNED_TYPE_TOKEN
- SUBQUERY_PREDICATES
- RESERVED_TOKENS
- DB_CREATABLES
- CREATABLES
- ALTERABLES
- ALIAS_TOKENS
- COLON_PLACEHOLDER_TOKENS
- ARRAY_CONSTRUCTORS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- ASSIGNMENT
- DISJUNCTION
- EQUALITY
- COMPARISON
- TERM
- FACTOR
- TIMES
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- CAST_COLUMN_OPERATORS
- EXPRESSION_PARSERS
- STRING_PARSERS
- NUMERIC_PARSERS
- PRIMARY_PARSERS
- PIPE_SYNTAX_TRANSFORM_PARSERS
- CONSTRAINT_PARSERS
- ALTER_PARSERS
- ALTER_ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- INVALID_FUNC_NAME_TOKENS
- FUNCTIONS_WITH_ALIASED_ARGS
- KEY_VALUE_DEFINITIONS
- QUERY_MODIFIER_PARSERS
- QUERY_MODIFIER_TOKENS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_LITERAL_PARSERS
- TYPE_CONVERTERS
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- CONFLICT_ACTIONS
- CREATE_SEQUENCE
- ISOLATED_LOADING_OPTIONS
- USABLES
- CAST_ACTIONS
- SCHEMA_BINDING_OPTIONS
- PROCEDURE_OPTIONS
- EXECUTE_AS_OPTIONS
- KEY_CONSTRAINT_OPTIONS
- WINDOW_EXCLUDE_OPTIONS
- INSERT_ALTERNATIVES
- CLONE_KEYWORDS
- HISTORICAL_DATA_PREFIX
- HISTORICAL_DATA_KIND
- OPCLASS_FOLLOW_KEYWORDS
- OPTYPE_FOLLOW_TOKENS
- TABLE_INDEX_HINT_TOKENS
- VIEW_ATTRIBUTES
- WINDOW_ALIAS_TOKENS
- WINDOW_BEFORE_PAREN_TOKENS
- WINDOW_SIDES
- JSON_KEY_VALUE_SEPARATOR_TOKENS
- FETCH_TOKENS
- ADD_CONSTRAINT_TOKENS
- DISTINCT_TOKENS
- UNNEST_OFFSET_ALIAS_TOKENS
- SELECT_START_TOKENS
- COPY_INTO_VARLEN_OPTIONS
- IS_JSON_PREDICATE_KIND
- ODBC_DATETIME_LITERALS
- ON_CONDITION_TOKENS
- PRIVILEGE_FOLLOW_TOKENS
- DESCRIBE_STYLES
- SET_ASSIGNMENT_DELIMITERS
- ANALYZE_STYLES
- ANALYZE_EXPRESSION_PARSERS
- PARTITION_KEYWORDS
- AMBIGUOUS_ALIAS_TOKENS
- OPERATION_MODIFIERS
- RECURSIVE_CTE_SEARCH_KIND
- MODIFIABLES
- STRICT_CAST
- PREFIXED_PIVOT_COLUMNS
- IDENTIFY_PIVOT_STRINGS
- LOG_DEFAULTS_TO_LN
- TABLESAMPLE_CSV
- DEFAULT_SAMPLING_METHOD
- SET_REQUIRES_ASSIGNMENT_DELIMITER
- TRIM_PATTERN_FIRST
- STRING_ALIASES
- MODIFIERS_ATTACHED_TO_SET_OP
- SET_OP_MODIFIERS
- NO_PAREN_IF_COMMANDS
- COLON_IS_VARIANT_EXTRACT
- VALUES_FOLLOWED_BY_PAREN
- INTERVAL_SPANS
- SUPPORTS_PARTITION_SELECTION
- WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
- OPTIONAL_ALIAS_TOKEN_CTE
- ALTER_RENAME_REQUIRES_COLUMN
- ALTER_TABLE_PARTITIONS
- JOINS_HAVE_EQUAL_PRECEDENCE
- ZONE_AWARE_TIMESTAMP_CONSTRUCTOR
- MAP_KEYS_ARE_ARBITRARY_EXPRESSIONS
- JSON_EXTRACT_REQUIRES_JSON_EXPRESSION
- ADD_JOIN_ON_TRUE
- error_level
- error_message_context
- max_errors
- dialect
- reset
- parse
- parse_into
- check_errors
- raise_error
- expression
- validate_expression
- parse_set_operation
- build_cast
- errors
- sql
137 class Tokenizer(Postgres.Tokenizer): 138 BIT_STRINGS = [] 139 HEX_STRINGS = [] 140 STRING_ESCAPES = ["\\", "'"] 141 142 KEYWORDS = { 143 **Postgres.Tokenizer.KEYWORDS, 144 "(+)": TokenType.JOIN_MARKER, 145 "HLLSKETCH": TokenType.HLLSKETCH, 146 "MINUS": TokenType.EXCEPT, 147 "SUPER": TokenType.SUPER, 148 "TOP": TokenType.TOP, 149 "UNLOAD": TokenType.COMMAND, 150 "VARBYTE": TokenType.VARBINARY, 151 "BINARY VARYING": TokenType.VARBINARY, 152 } 153 KEYWORDS.pop("VALUES") 154 155 # Redshift allows # to appear as a table identifier prefix 156 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 157 SINGLE_TOKENS.pop("#")
Inherited Members
- sqlglot.tokens.Tokenizer
- Tokenizer
- RAW_STRINGS
- UNICODE_STRINGS
- IDENTIFIERS
- QUOTES
- ESCAPE_FOLLOW_CHARS
- IDENTIFIER_ESCAPES
- STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS
- NESTED_COMMENTS
- HINT_START
- TOKENS_PRECEDING_HINT
- WHITE_SPACE
- COMMANDS
- COMMAND_PREFIX_TOKENS
- NUMERIC_LITERALS
- COMMENTS
- dialect
- use_rs_tokenizer
- reset
- tokenize
- tokenize_rs
- size
- sql
- tokens
159 class Generator(Postgres.Generator): 160 LOCKING_READS_SUPPORTED = False 161 QUERY_HINTS = False 162 VALUES_AS_TABLE = False 163 TZ_TO_WITH_TIME_ZONE = True 164 NVL2_SUPPORTED = True 165 LAST_DAY_SUPPORTS_DATE_PART = False 166 CAN_IMPLEMENT_ARRAY_ANY = False 167 MULTI_ARG_DISTINCT = True 168 COPY_PARAMS_ARE_WRAPPED = False 169 HEX_FUNC = "TO_HEX" 170 PARSE_JSON_NAME = "JSON_PARSE" 171 ARRAY_CONCAT_IS_VAR_LEN = False 172 SUPPORTS_CONVERT_TIMEZONE = True 173 EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False 174 SUPPORTS_MEDIAN = True 175 ALTER_SET_TYPE = "TYPE" 176 SUPPORTS_DECODE_CASE = True 177 SUPPORTS_BETWEEN_FLAGS = False 178 LIMIT_FETCH = "LIMIT" 179 180 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 181 WITH_PROPERTIES_PREFIX = " " 182 183 TYPE_MAPPING = { 184 **Postgres.Generator.TYPE_MAPPING, 185 exp.DataType.Type.BINARY: "VARBYTE", 186 exp.DataType.Type.BLOB: "VARBYTE", 187 exp.DataType.Type.INT: "INTEGER", 188 exp.DataType.Type.TIMETZ: "TIME", 189 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 190 exp.DataType.Type.VARBINARY: "VARBYTE", 191 exp.DataType.Type.ROWVERSION: "VARBYTE", 192 } 193 194 TRANSFORMS = { 195 **Postgres.Generator.TRANSFORMS, 196 exp.ArrayConcat: array_concat_sql("ARRAY_CONCAT"), 197 exp.Concat: concat_to_dpipe_sql, 198 exp.ConcatWs: concat_ws_to_dpipe_sql, 199 exp.ApproxDistinct: lambda self, 200 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 201 exp.CurrentTimestamp: lambda self, e: ( 202 "SYSDATE" if e.args.get("sysdate") else "GETDATE()" 203 ), 204 exp.DateAdd: date_delta_sql("DATEADD"), 205 exp.DateDiff: date_delta_sql("DATEDIFF"), 206 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 207 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 208 exp.Explode: lambda self, e: self.explode_sql(e), 209 exp.FarmFingerprint: rename_func("FARMFINGERPRINT64"), 210 exp.FromBase: rename_func("STRTOL"), 211 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 212 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 213 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 214 exp.GroupConcat: rename_func("LISTAGG"), 215 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 216 exp.RegexpExtract: rename_func("REGEXP_SUBSTR"), 217 exp.Select: transforms.preprocess( 218 [ 219 transforms.eliminate_window_clause, 220 transforms.eliminate_distinct_on, 221 transforms.eliminate_semi_and_anti_joins, 222 transforms.unqualify_unnest, 223 transforms.unnest_generate_date_array_using_recursive_cte, 224 ] 225 ), 226 exp.SortKeyProperty: lambda self, 227 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 228 exp.StartsWith: lambda self, 229 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 230 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 231 exp.TableSample: no_tablesample_sql, 232 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 233 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 234 exp.UnixToTime: lambda self, e: self._unix_to_time_sql(e), 235 exp.SHA2Digest: lambda self, e: self.func( 236 "SHA2", e.this, e.args.get("length") or exp.Literal.number(256) 237 ), 238 } 239 240 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 241 TRANSFORMS.pop(exp.Pivot) 242 243 # Postgres doesn't support JSON_PARSE, but Redshift does 244 TRANSFORMS.pop(exp.ParseJSON) 245 246 # Redshift supports these functions 247 TRANSFORMS.pop(exp.AnyValue) 248 TRANSFORMS.pop(exp.LastDay) 249 TRANSFORMS.pop(exp.SHA2) 250 251 # Postgres and Redshift have different semantics for Getbit 252 TRANSFORMS.pop(exp.Getbit) 253 254 # Postgres does not permit a double precision argument in ROUND; Redshift does 255 TRANSFORMS.pop(exp.Round) 256 257 RESERVED_KEYWORDS = { 258 "aes128", 259 "aes256", 260 "all", 261 "allowoverwrite", 262 "analyse", 263 "analyze", 264 "and", 265 "any", 266 "array", 267 "as", 268 "asc", 269 "authorization", 270 "az64", 271 "backup", 272 "between", 273 "binary", 274 "blanksasnull", 275 "both", 276 "bytedict", 277 "bzip2", 278 "case", 279 "cast", 280 "check", 281 "collate", 282 "column", 283 "constraint", 284 "create", 285 "credentials", 286 "cross", 287 "current_date", 288 "current_time", 289 "current_timestamp", 290 "current_user", 291 "current_user_id", 292 "default", 293 "deferrable", 294 "deflate", 295 "defrag", 296 "delta", 297 "delta32k", 298 "desc", 299 "disable", 300 "distinct", 301 "do", 302 "else", 303 "emptyasnull", 304 "enable", 305 "encode", 306 "encrypt ", 307 "encryption", 308 "end", 309 "except", 310 "explicit", 311 "false", 312 "for", 313 "foreign", 314 "freeze", 315 "from", 316 "full", 317 "globaldict256", 318 "globaldict64k", 319 "grant", 320 "group", 321 "gzip", 322 "having", 323 "identity", 324 "ignore", 325 "ilike", 326 "in", 327 "initially", 328 "inner", 329 "intersect", 330 "interval", 331 "into", 332 "is", 333 "isnull", 334 "join", 335 "leading", 336 "left", 337 "like", 338 "limit", 339 "localtime", 340 "localtimestamp", 341 "lun", 342 "luns", 343 "lzo", 344 "lzop", 345 "minus", 346 "mostly16", 347 "mostly32", 348 "mostly8", 349 "natural", 350 "new", 351 "not", 352 "notnull", 353 "null", 354 "nulls", 355 "off", 356 "offline", 357 "offset", 358 "oid", 359 "old", 360 "on", 361 "only", 362 "open", 363 "or", 364 "order", 365 "outer", 366 "overlaps", 367 "parallel", 368 "partition", 369 "percent", 370 "permissions", 371 "pivot", 372 "placing", 373 "primary", 374 "raw", 375 "readratio", 376 "recover", 377 "references", 378 "rejectlog", 379 "resort", 380 "respect", 381 "restore", 382 "right", 383 "select", 384 "session_user", 385 "similar", 386 "snapshot", 387 "some", 388 "sysdate", 389 "system", 390 "table", 391 "tag", 392 "tdes", 393 "text255", 394 "text32k", 395 "then", 396 "timestamp", 397 "to", 398 "top", 399 "trailing", 400 "true", 401 "truncatecolumns", 402 "type", 403 "union", 404 "unique", 405 "unnest", 406 "unpivot", 407 "user", 408 "using", 409 "verbose", 410 "wallet", 411 "when", 412 "where", 413 "with", 414 "without", 415 } 416 417 def unnest_sql(self, expression: exp.Unnest) -> str: 418 args = expression.expressions 419 num_args = len(args) 420 421 if num_args != 1: 422 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 423 return "" 424 425 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 426 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 427 return "" 428 429 arg = self.sql(seq_get(args, 0)) 430 431 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 432 return f"{arg} AS {alias}" if alias else arg 433 434 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 435 if expression.is_type(exp.DataType.Type.JSON): 436 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 437 return self.sql(expression, "this") 438 439 return super().cast_sql(expression, safe_prefix=safe_prefix) 440 441 def datatype_sql(self, expression: exp.DataType) -> str: 442 """ 443 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 444 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 445 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 446 `TEXT` to `VARCHAR`. 447 """ 448 if expression.is_type("text"): 449 expression.set("this", exp.DataType.Type.VARCHAR) 450 precision = expression.args.get("expressions") 451 452 if not precision: 453 expression.append("expressions", exp.var("MAX")) 454 455 return super().datatype_sql(expression) 456 457 def alterset_sql(self, expression: exp.AlterSet) -> str: 458 exprs = self.expressions(expression, flat=True) 459 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 460 location = self.sql(expression, "location") 461 location = f" LOCATION {location}" if location else "" 462 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 463 file_format = f" FILE FORMAT {file_format}" if file_format else "" 464 465 return f"SET{exprs}{location}{file_format}" 466 467 def array_sql(self, expression: exp.Array) -> str: 468 if expression.args.get("bracket_notation"): 469 return super().array_sql(expression) 470 471 return rename_func("ARRAY")(self, expression) 472 473 def explode_sql(self, expression: exp.Explode) -> str: 474 self.unsupported("Unsupported EXPLODE() function") 475 return "" 476 477 def _unix_to_time_sql(self, expression: exp.UnixToTime) -> str: 478 scale = expression.args.get("scale") 479 this = self.sql(expression.this) 480 481 if scale is not None and scale != exp.UnixToTime.SECONDS and scale.is_int: 482 this = f"({this} / POWER(10, {scale.to_py()}))" 483 484 return f"(TIMESTAMP 'epoch' + {this} * INTERVAL '1 SECOND')"
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True: Always quote except for specials cases. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether to normalize identifiers to lowercase. Default: False.
- pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
- indent: The indentation size in a formatted string. For example, this affects the
indentation of subqueries and filters under a
WHEREclause. Default: 2. - normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether to preserve comments in the output SQL code. Default: True
417 def unnest_sql(self, expression: exp.Unnest) -> str: 418 args = expression.expressions 419 num_args = len(args) 420 421 if num_args != 1: 422 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 423 return "" 424 425 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 426 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 427 return "" 428 429 arg = self.sql(seq_get(args, 0)) 430 431 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 432 return f"{arg} AS {alias}" if alias else arg
434 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 435 if expression.is_type(exp.DataType.Type.JSON): 436 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 437 return self.sql(expression, "this") 438 439 return super().cast_sql(expression, safe_prefix=safe_prefix)
441 def datatype_sql(self, expression: exp.DataType) -> str: 442 """ 443 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 444 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 445 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 446 `TEXT` to `VARCHAR`. 447 """ 448 if expression.is_type("text"): 449 expression.set("this", exp.DataType.Type.VARCHAR) 450 precision = expression.args.get("expressions") 451 452 if not precision: 453 expression.append("expressions", exp.var("MAX")) 454 455 return super().datatype_sql(expression)
Redshift converts the TEXT data type to VARCHAR(255) by default when people more generally mean
VARCHAR of max length which is VARCHAR(max) in Redshift. Therefore if we get a TEXT data type
without precision we convert it to VARCHAR(max) and if it does have precision then we just convert
TEXT to VARCHAR.
457 def alterset_sql(self, expression: exp.AlterSet) -> str: 458 exprs = self.expressions(expression, flat=True) 459 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 460 location = self.sql(expression, "location") 461 location = f" LOCATION {location}" if location else "" 462 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 463 file_format = f" FILE FORMAT {file_format}" if file_format else "" 464 465 return f"SET{exprs}{location}{file_format}"
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- IGNORE_NULLS_IN_FUNC
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- INTERVAL_ALLOWS_PLURAL_FORM
- LIMIT_ONLY_LITERALS
- GROUPINGS_SEP
- INDEX_ON
- DIRECTED_JOINS
- QUERY_HINT_SEP
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- EXTRACT_ALLOWS_QUOTES
- ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
- UNNEST_WITH_ORDINALITY
- AGGREGATE_FILTER_SUPPORTED
- SEMI_ANTI_JOIN_WITH_SIDE
- COMPUTED_COLUMN_WITH_TYPE
- SUPPORTS_TABLE_COPY
- TABLESAMPLE_REQUIRES_PARENS
- TABLESAMPLE_KEYWORDS
- TABLESAMPLE_WITH_METHOD
- COLLATE_IS_FUNC
- DATA_TYPE_SPECIFIERS_ALLOWED
- ENSURE_BOOLS
- CTE_RECURSIVE_KEYWORD_REQUIRED
- SUPPORTS_SINGLE_ARG_CONCAT
- SUPPORTS_TABLE_ALIAS_COLUMNS
- UNPIVOT_ALIASES_ARE_IDENTIFIERS
- JSON_KEY_VALUE_PAIR_SEP
- INSERT_OVERWRITE
- SUPPORTS_CREATE_TABLE_LIKE
- JSON_PATH_BRACKETED_KEY_SUPPORTED
- JSON_PATH_SINGLE_QUOTE_ESCAPE
- SUPPORTS_TO_NUMBER
- SET_OP_MODIFIERS
- COPY_PARAMS_EQ_REQUIRED
- UNICODE_SUBSTITUTE
- STAR_EXCEPT
- QUOTE_JSON_PATH
- PAD_FILL_PATTERN_IS_REQUIRED
- SUPPORTS_EXPLODING_PROJECTIONS
- SUPPORTS_UNIX_SECONDS
- ALTER_SET_WRAPPED
- NORMALIZE_EXTRACT_DATE_PARTS
- ARRAY_SIZE_NAME
- SUPPORTS_LIKE_QUANTIFIERS
- MATCH_AGAINST_TABLE_PREFIX
- SET_ASSIGNMENT_REQUIRES_VARIABLE_KEYWORD
- UPDATE_STATEMENT_SUPPORTS_FROM
- UNSUPPORTED_TYPES
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
- WITH_SEPARATED_COMMENTS
- EXCLUDE_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- PARAMETERIZABLE_TEXT_TYPES
- EXPRESSIONS_WITHOUT_NESTED_CTES
- RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS
- SAFE_JSON_PATH_KEY_RE
- SENTINEL_LINE_BREAK
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- dialect
- normalize_functions
- unsupported_messages
- generate
- preprocess
- unsupported
- sep
- seg
- sanitize_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_parts
- column_sql
- pseudocolumn_sql
- columnposition_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- generatedasrowcolumnconstraint_sql
- periodforsystemtimeconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- inoutcolumnconstraint_sql
- createable_sql
- create_sql
- sequenceproperties_sql
- clone_sql
- describe_sql
- heredoc_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- unicodestring_sql
- rawstring_sql
- datatypeparam_sql
- directory_sql
- delete_sql
- drop_sql
- set_operation
- set_operations
- fetch_sql
- limitoptions_sql
- filter_sql
- hint_sql
- indexparameters_sql
- index_sql
- identifier_sql
- hex_sql
- lowerhex_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- with_properties
- locate_properties
- property_name
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- partitionboundspec_sql
- partitionedofproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- withsystemversioningproperty_sql
- insert_sql
- introducer_sql
- kill_sql
- pseudotype_sql
- objectidentifier_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- historicaldata_sql
- table_parts
- table_sql
- tablefromrows_sql
- tablesample_sql
- pivot_sql
- version_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- groupingsets_sql
- rollup_sql
- rollupindex_sql
- rollupproperty_sql
- cube_sql
- group_sql
- having_sql
- connect_sql
- prior_sql
- join_sql
- lambda_sql
- lateral_op
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- queryband_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- booland_sql
- boolor_sql
- order_sql
- withfill_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognizemeasure_sql
- matchrecognize_sql
- query_modifiers
- options_modifier
- for_modifiers
- queryoption_sql
- offset_limit_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- subquery_sql
- qualify_sql
- prewhere_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_offset_expressions
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- convert_concat_args
- concat_sql
- concatws_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- jsonkeyvalue_sql
- jsonpath_sql
- json_path_part
- formatjson_sql
- formatphrase_sql
- jsonobject_sql
- jsonobjectagg_sql
- jsonarray_sql
- jsonarrayagg_sql
- jsoncolumndef_sql
- jsonschema_sql
- jsontable_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- pivotalias_sql
- aliases_sql
- atindex_sql
- attimezone_sql
- fromtimezone_sql
- add_sql
- and_sql
- or_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- strtotime_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- alterindex_sql
- alterdiststyle_sql
- altersortkey_sql
- alterrename_sql
- renamecolumn_sql
- alter_sql
- altersession_sql
- add_column_sql
- droppartition_sql
- addconstraint_sql
- addpartition_sql
- distinct_sql
- havingmax_sql
- intdiv_sql
- dpipe_sql
- div_sql
- safedivide_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- propertyeq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- is_sql
- like_sql
- ilike_sql
- match_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- sub_sql
- trycast_sql
- jsoncast_sql
- try_sql
- log_sql
- use_sql
- binary
- ceil_floor
- function_fallback_sql
- func
- format_args
- too_wide
- format_time
- expressions
- op_expressions
- naked_property
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- whens_sql
- merge_sql
- tochar_sql
- tonumber_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- duplicatekeyproperty_sql
- uniquekeyproperty_sql
- distributedbyproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- checkcolumnconstraint_sql
- indexcolumnconstraint_sql
- nvl2_sql
- comprehension_sql
- columnprefix_sql
- opclass_sql
- predict_sql
- generateembedding_sql
- mltranslate_sql
- mlforecast_sql
- featuresattime_sql
- vectorsearch_sql
- forin_sql
- refresh_sql
- toarray_sql
- tsordstotime_sql
- tsordstotimestamp_sql
- tsordstodatetime_sql
- tsordstodate_sql
- unixdate_sql
- lastday_sql
- dateadd_sql
- arrayany_sql
- struct_sql
- partitionrange_sql
- truncatetable_sql
- convert_sql
- copyparameter_sql
- credentials_sql
- copy_sql
- semicolon_sql
- datadeletionproperty_sql
- maskingpolicycolumnconstraint_sql
- gapfill_sql
- scope_resolution
- scoperesolution_sql
- parsejson_sql
- rand_sql
- changes_sql
- pad_sql
- summarize_sql
- explodinggenerateseries_sql
- converttimezone_sql
- json_sql
- jsonvalue_sql
- conditionalinsert_sql
- multitableinserts_sql
- oncondition_sql
- jsonextractquote_sql
- jsonexists_sql
- arrayagg_sql
- slice_sql
- apply_sql
- grant_sql
- revoke_sql
- grantprivilege_sql
- grantprincipal_sql
- columns_sql
- overlay_sql
- todouble_sql
- string_sql
- median_sql
- overflowtruncatebehavior_sql
- unixseconds_sql
- arraysize_sql
- attach_sql
- detach_sql
- attachoption_sql
- watermarkcolumnconstraint_sql
- encodeproperty_sql
- includeproperty_sql
- xmlelement_sql
- xmlkeyvalueoption_sql
- partitionbyrangeproperty_sql
- partitionbyrangepropertydynamic_sql
- unpivotcolumns_sql
- analyzesample_sql
- analyzestatistics_sql
- analyzehistogram_sql
- analyzedelete_sql
- analyzelistchainedrows_sql
- analyzevalidate_sql
- analyze_sql
- xmltable_sql
- xmlnamespace_sql
- export_sql
- declare_sql
- declareitem_sql
- recursivewithsearch_sql
- parameterizedagg_sql
- anonymousaggfunc_sql
- combinedaggfunc_sql
- combinedparameterizedagg_sql
- show_sql
- install_sql
- get_put_sql
- translatecharacters_sql
- decodecase_sql
- semanticview_sql
- getextract_sql
- datefromunixdate_sql
- space_sql
- buildproperty_sql
- refreshtriggerproperty_sql
- modelattribute_sql
- directorystage_sql
- uuid_sql
- initcap_sql
- localtime_sql
- localtimestamp_sql
- weekstart_sql
- chr_sql
- sqlglot.dialects.postgres.Postgres.Generator
- SINGLE_STRING_INTERVAL
- RENAME_TABLE_WITH_DB
- JOIN_HINTS
- TABLE_HINTS
- PARAMETER_TOKEN
- NAMED_PLACEHOLDER_TOKEN
- TABLESAMPLE_SIZE_IS_ROWS
- TABLESAMPLE_SEED_KEYWORD
- SUPPORTS_SELECT_INTO
- JSON_TYPE_REQUIRED_FOR_EXTRACTION
- SUPPORTS_UNLOGGED_TABLES
- LIKE_PROPERTY_INSIDE_SCHEMA
- SUPPORTS_WINDOW_EXCLUDE
- COPY_HAS_INTO_KEYWORD
- ARRAY_SIZE_DIM_REQUIRED
- INOUT_SEPARATOR
- SUPPORTED_JSON_PATH_PARTS
- lateral_sql
- PROPERTIES_LOCATION
- schemacommentproperty_sql
- commentcolumnconstraint_sql
- columndef_sql
- bracket_sql
- matchagainst_sql
- computedcolumnconstraint_sql
- isascii_sql
- ignorenulls_sql
- respectnulls_sql
- currentschema_sql
- interval_sql
- placeholder_sql
- arraycontains_sql