sqlglot.dialects.redshift
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, transforms 6from sqlglot.dialects.dialect import ( 7 NormalizationStrategy, 8 concat_to_dpipe_sql, 9 concat_ws_to_dpipe_sql, 10 date_delta_sql, 11 generatedasidentitycolumnconstraint_sql, 12 json_extract_segments, 13 no_tablesample_sql, 14 rename_func, 15 map_date_part, 16) 17from sqlglot.dialects.postgres import Postgres 18from sqlglot.helper import seq_get 19from sqlglot.tokens import TokenType 20from sqlglot.parser import build_convert_timezone 21 22if t.TYPE_CHECKING: 23 from sqlglot._typing import E 24 25 26def _build_date_delta(expr_type: t.Type[E]) -> t.Callable[[t.List], E]: 27 def _builder(args: t.List) -> E: 28 expr = expr_type( 29 this=seq_get(args, 2), 30 expression=seq_get(args, 1), 31 unit=map_date_part(seq_get(args, 0)), 32 ) 33 if expr_type is exp.TsOrDsAdd: 34 expr.set("return_type", exp.DataType.build("TIMESTAMP")) 35 36 return expr 37 38 return _builder 39 40 41class Redshift(Postgres): 42 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 43 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 44 45 SUPPORTS_USER_DEFINED_TYPES = False 46 INDEX_OFFSET = 0 47 COPY_PARAMS_ARE_CSV = False 48 HEX_LOWERCASE = True 49 HAS_DISTINCT_ARRAY_CONSTRUCTORS = True 50 51 # ref: https://docs.aws.amazon.com/redshift/latest/dg/r_FORMAT_strings.html 52 TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" 53 TIME_MAPPING = {**Postgres.TIME_MAPPING, "MON": "%b", "HH24": "%H", "HH": "%I"} 54 55 class Parser(Postgres.Parser): 56 FUNCTIONS = { 57 **Postgres.Parser.FUNCTIONS, 58 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 59 this=seq_get(args, 0), 60 expression=seq_get(args, 1), 61 unit=exp.var("month"), 62 return_type=exp.DataType.build("TIMESTAMP"), 63 ), 64 "CONVERT_TIMEZONE": lambda args: build_convert_timezone(args, "UTC"), 65 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 66 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 67 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 68 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 69 "GETDATE": exp.CurrentTimestamp.from_arg_list, 70 "LISTAGG": exp.GroupConcat.from_arg_list, 71 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 72 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 73 ), 74 "STRTOL": exp.FromBase.from_arg_list, 75 } 76 77 NO_PAREN_FUNCTION_PARSERS = { 78 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 79 "APPROXIMATE": lambda self: self._parse_approximate_count(), 80 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 81 } 82 83 SUPPORTS_IMPLICIT_UNNEST = True 84 85 def _parse_table( 86 self, 87 schema: bool = False, 88 joins: bool = False, 89 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 90 parse_bracket: bool = False, 91 is_db_reference: bool = False, 92 parse_partition: bool = False, 93 consume_pipe: bool = False, 94 ) -> t.Optional[exp.Expression]: 95 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 96 unpivot = self._match(TokenType.UNPIVOT) 97 table = super()._parse_table( 98 schema=schema, 99 joins=joins, 100 alias_tokens=alias_tokens, 101 parse_bracket=parse_bracket, 102 is_db_reference=is_db_reference, 103 ) 104 105 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 106 107 def _parse_convert( 108 self, strict: bool, safe: t.Optional[bool] = None 109 ) -> t.Optional[exp.Expression]: 110 to = self._parse_types() 111 self._match(TokenType.COMMA) 112 this = self._parse_bitwise() 113 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 114 115 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 116 index = self._index - 1 117 func = self._parse_function() 118 119 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 120 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 121 self._retreat(index) 122 return None 123 124 class Tokenizer(Postgres.Tokenizer): 125 BIT_STRINGS = [] 126 HEX_STRINGS = [] 127 STRING_ESCAPES = ["\\", "'"] 128 129 KEYWORDS = { 130 **Postgres.Tokenizer.KEYWORDS, 131 "(+)": TokenType.JOIN_MARKER, 132 "HLLSKETCH": TokenType.HLLSKETCH, 133 "MINUS": TokenType.EXCEPT, 134 "SUPER": TokenType.SUPER, 135 "TOP": TokenType.TOP, 136 "UNLOAD": TokenType.COMMAND, 137 "VARBYTE": TokenType.VARBINARY, 138 "BINARY VARYING": TokenType.VARBINARY, 139 } 140 KEYWORDS.pop("VALUES") 141 142 # Redshift allows # to appear as a table identifier prefix 143 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 144 SINGLE_TOKENS.pop("#") 145 146 class Generator(Postgres.Generator): 147 LOCKING_READS_SUPPORTED = False 148 QUERY_HINTS = False 149 VALUES_AS_TABLE = False 150 TZ_TO_WITH_TIME_ZONE = True 151 NVL2_SUPPORTED = True 152 LAST_DAY_SUPPORTS_DATE_PART = False 153 CAN_IMPLEMENT_ARRAY_ANY = False 154 MULTI_ARG_DISTINCT = True 155 COPY_PARAMS_ARE_WRAPPED = False 156 HEX_FUNC = "TO_HEX" 157 PARSE_JSON_NAME = "JSON_PARSE" 158 ARRAY_CONCAT_IS_VAR_LEN = False 159 SUPPORTS_CONVERT_TIMEZONE = True 160 EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False 161 SUPPORTS_MEDIAN = True 162 ALTER_SET_TYPE = "TYPE" 163 164 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 165 WITH_PROPERTIES_PREFIX = " " 166 167 TYPE_MAPPING = { 168 **Postgres.Generator.TYPE_MAPPING, 169 exp.DataType.Type.BINARY: "VARBYTE", 170 exp.DataType.Type.BLOB: "VARBYTE", 171 exp.DataType.Type.INT: "INTEGER", 172 exp.DataType.Type.TIMETZ: "TIME", 173 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 174 exp.DataType.Type.VARBINARY: "VARBYTE", 175 exp.DataType.Type.ROWVERSION: "VARBYTE", 176 } 177 178 TRANSFORMS = { 179 **Postgres.Generator.TRANSFORMS, 180 exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CONCAT"), 181 exp.Concat: concat_to_dpipe_sql, 182 exp.ConcatWs: concat_ws_to_dpipe_sql, 183 exp.ApproxDistinct: lambda self, 184 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 185 exp.CurrentTimestamp: lambda self, e: ( 186 "SYSDATE" if e.args.get("sysdate") else "GETDATE()" 187 ), 188 exp.DateAdd: date_delta_sql("DATEADD"), 189 exp.DateDiff: date_delta_sql("DATEDIFF"), 190 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 191 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 192 exp.Explode: lambda self, e: self.explode_sql(e), 193 exp.FromBase: rename_func("STRTOL"), 194 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 195 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 196 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 197 exp.GroupConcat: rename_func("LISTAGG"), 198 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 199 exp.Select: transforms.preprocess( 200 [ 201 transforms.eliminate_window_clause, 202 transforms.eliminate_distinct_on, 203 transforms.eliminate_semi_and_anti_joins, 204 transforms.unqualify_unnest, 205 transforms.unnest_generate_date_array_using_recursive_cte, 206 ] 207 ), 208 exp.SortKeyProperty: lambda self, 209 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 210 exp.StartsWith: lambda self, 211 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 212 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 213 exp.TableSample: no_tablesample_sql, 214 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 215 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 216 exp.UnixToTime: lambda self, e: self._unix_to_time_sql(e), 217 } 218 219 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 220 TRANSFORMS.pop(exp.Pivot) 221 222 # Postgres doesn't support JSON_PARSE, but Redshift does 223 TRANSFORMS.pop(exp.ParseJSON) 224 225 # Redshift supports these functions 226 TRANSFORMS.pop(exp.AnyValue) 227 TRANSFORMS.pop(exp.LastDay) 228 TRANSFORMS.pop(exp.SHA2) 229 230 RESERVED_KEYWORDS = { 231 "aes128", 232 "aes256", 233 "all", 234 "allowoverwrite", 235 "analyse", 236 "analyze", 237 "and", 238 "any", 239 "array", 240 "as", 241 "asc", 242 "authorization", 243 "az64", 244 "backup", 245 "between", 246 "binary", 247 "blanksasnull", 248 "both", 249 "bytedict", 250 "bzip2", 251 "case", 252 "cast", 253 "check", 254 "collate", 255 "column", 256 "constraint", 257 "create", 258 "credentials", 259 "cross", 260 "current_date", 261 "current_time", 262 "current_timestamp", 263 "current_user", 264 "current_user_id", 265 "default", 266 "deferrable", 267 "deflate", 268 "defrag", 269 "delta", 270 "delta32k", 271 "desc", 272 "disable", 273 "distinct", 274 "do", 275 "else", 276 "emptyasnull", 277 "enable", 278 "encode", 279 "encrypt ", 280 "encryption", 281 "end", 282 "except", 283 "explicit", 284 "false", 285 "for", 286 "foreign", 287 "freeze", 288 "from", 289 "full", 290 "globaldict256", 291 "globaldict64k", 292 "grant", 293 "group", 294 "gzip", 295 "having", 296 "identity", 297 "ignore", 298 "ilike", 299 "in", 300 "initially", 301 "inner", 302 "intersect", 303 "interval", 304 "into", 305 "is", 306 "isnull", 307 "join", 308 "leading", 309 "left", 310 "like", 311 "limit", 312 "localtime", 313 "localtimestamp", 314 "lun", 315 "luns", 316 "lzo", 317 "lzop", 318 "minus", 319 "mostly16", 320 "mostly32", 321 "mostly8", 322 "natural", 323 "new", 324 "not", 325 "notnull", 326 "null", 327 "nulls", 328 "off", 329 "offline", 330 "offset", 331 "oid", 332 "old", 333 "on", 334 "only", 335 "open", 336 "or", 337 "order", 338 "outer", 339 "overlaps", 340 "parallel", 341 "partition", 342 "percent", 343 "permissions", 344 "pivot", 345 "placing", 346 "primary", 347 "raw", 348 "readratio", 349 "recover", 350 "references", 351 "rejectlog", 352 "resort", 353 "respect", 354 "restore", 355 "right", 356 "select", 357 "session_user", 358 "similar", 359 "snapshot", 360 "some", 361 "sysdate", 362 "system", 363 "table", 364 "tag", 365 "tdes", 366 "text255", 367 "text32k", 368 "then", 369 "timestamp", 370 "to", 371 "top", 372 "trailing", 373 "true", 374 "truncatecolumns", 375 "type", 376 "union", 377 "unique", 378 "unnest", 379 "unpivot", 380 "user", 381 "using", 382 "verbose", 383 "wallet", 384 "when", 385 "where", 386 "with", 387 "without", 388 } 389 390 def unnest_sql(self, expression: exp.Unnest) -> str: 391 args = expression.expressions 392 num_args = len(args) 393 394 if num_args != 1: 395 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 396 return "" 397 398 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 399 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 400 return "" 401 402 arg = self.sql(seq_get(args, 0)) 403 404 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 405 return f"{arg} AS {alias}" if alias else arg 406 407 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 408 if expression.is_type(exp.DataType.Type.JSON): 409 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 410 return self.sql(expression, "this") 411 412 return super().cast_sql(expression, safe_prefix=safe_prefix) 413 414 def datatype_sql(self, expression: exp.DataType) -> str: 415 """ 416 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 417 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 418 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 419 `TEXT` to `VARCHAR`. 420 """ 421 if expression.is_type("text"): 422 expression.set("this", exp.DataType.Type.VARCHAR) 423 precision = expression.args.get("expressions") 424 425 if not precision: 426 expression.append("expressions", exp.var("MAX")) 427 428 return super().datatype_sql(expression) 429 430 def alterset_sql(self, expression: exp.AlterSet) -> str: 431 exprs = self.expressions(expression, flat=True) 432 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 433 location = self.sql(expression, "location") 434 location = f" LOCATION {location}" if location else "" 435 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 436 file_format = f" FILE FORMAT {file_format}" if file_format else "" 437 438 return f"SET{exprs}{location}{file_format}" 439 440 def array_sql(self, expression: exp.Array) -> str: 441 if expression.args.get("bracket_notation"): 442 return super().array_sql(expression) 443 444 return rename_func("ARRAY")(self, expression) 445 446 def explode_sql(self, expression: exp.Explode) -> str: 447 self.unsupported("Unsupported EXPLODE() function") 448 return "" 449 450 def _unix_to_time_sql(self, expression: exp.UnixToTime) -> str: 451 scale = expression.args.get("scale") 452 this = self.sql(expression.this) 453 454 if scale is not None and scale != exp.UnixToTime.SECONDS and scale.is_int: 455 this = f"({this} / POWER(10, {scale.to_py()}))" 456 457 return f"(TIMESTAMP 'epoch' + {this} * INTERVAL '1 SECOND')"
42class Redshift(Postgres): 43 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 44 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 45 46 SUPPORTS_USER_DEFINED_TYPES = False 47 INDEX_OFFSET = 0 48 COPY_PARAMS_ARE_CSV = False 49 HEX_LOWERCASE = True 50 HAS_DISTINCT_ARRAY_CONSTRUCTORS = True 51 52 # ref: https://docs.aws.amazon.com/redshift/latest/dg/r_FORMAT_strings.html 53 TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" 54 TIME_MAPPING = {**Postgres.TIME_MAPPING, "MON": "%b", "HH24": "%H", "HH": "%I"} 55 56 class Parser(Postgres.Parser): 57 FUNCTIONS = { 58 **Postgres.Parser.FUNCTIONS, 59 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 60 this=seq_get(args, 0), 61 expression=seq_get(args, 1), 62 unit=exp.var("month"), 63 return_type=exp.DataType.build("TIMESTAMP"), 64 ), 65 "CONVERT_TIMEZONE": lambda args: build_convert_timezone(args, "UTC"), 66 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 67 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 68 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 69 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 70 "GETDATE": exp.CurrentTimestamp.from_arg_list, 71 "LISTAGG": exp.GroupConcat.from_arg_list, 72 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 73 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 74 ), 75 "STRTOL": exp.FromBase.from_arg_list, 76 } 77 78 NO_PAREN_FUNCTION_PARSERS = { 79 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 80 "APPROXIMATE": lambda self: self._parse_approximate_count(), 81 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 82 } 83 84 SUPPORTS_IMPLICIT_UNNEST = True 85 86 def _parse_table( 87 self, 88 schema: bool = False, 89 joins: bool = False, 90 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 91 parse_bracket: bool = False, 92 is_db_reference: bool = False, 93 parse_partition: bool = False, 94 consume_pipe: bool = False, 95 ) -> t.Optional[exp.Expression]: 96 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 97 unpivot = self._match(TokenType.UNPIVOT) 98 table = super()._parse_table( 99 schema=schema, 100 joins=joins, 101 alias_tokens=alias_tokens, 102 parse_bracket=parse_bracket, 103 is_db_reference=is_db_reference, 104 ) 105 106 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 107 108 def _parse_convert( 109 self, strict: bool, safe: t.Optional[bool] = None 110 ) -> t.Optional[exp.Expression]: 111 to = self._parse_types() 112 self._match(TokenType.COMMA) 113 this = self._parse_bitwise() 114 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 115 116 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 117 index = self._index - 1 118 func = self._parse_function() 119 120 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 121 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 122 self._retreat(index) 123 return None 124 125 class Tokenizer(Postgres.Tokenizer): 126 BIT_STRINGS = [] 127 HEX_STRINGS = [] 128 STRING_ESCAPES = ["\\", "'"] 129 130 KEYWORDS = { 131 **Postgres.Tokenizer.KEYWORDS, 132 "(+)": TokenType.JOIN_MARKER, 133 "HLLSKETCH": TokenType.HLLSKETCH, 134 "MINUS": TokenType.EXCEPT, 135 "SUPER": TokenType.SUPER, 136 "TOP": TokenType.TOP, 137 "UNLOAD": TokenType.COMMAND, 138 "VARBYTE": TokenType.VARBINARY, 139 "BINARY VARYING": TokenType.VARBINARY, 140 } 141 KEYWORDS.pop("VALUES") 142 143 # Redshift allows # to appear as a table identifier prefix 144 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 145 SINGLE_TOKENS.pop("#") 146 147 class Generator(Postgres.Generator): 148 LOCKING_READS_SUPPORTED = False 149 QUERY_HINTS = False 150 VALUES_AS_TABLE = False 151 TZ_TO_WITH_TIME_ZONE = True 152 NVL2_SUPPORTED = True 153 LAST_DAY_SUPPORTS_DATE_PART = False 154 CAN_IMPLEMENT_ARRAY_ANY = False 155 MULTI_ARG_DISTINCT = True 156 COPY_PARAMS_ARE_WRAPPED = False 157 HEX_FUNC = "TO_HEX" 158 PARSE_JSON_NAME = "JSON_PARSE" 159 ARRAY_CONCAT_IS_VAR_LEN = False 160 SUPPORTS_CONVERT_TIMEZONE = True 161 EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False 162 SUPPORTS_MEDIAN = True 163 ALTER_SET_TYPE = "TYPE" 164 165 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 166 WITH_PROPERTIES_PREFIX = " " 167 168 TYPE_MAPPING = { 169 **Postgres.Generator.TYPE_MAPPING, 170 exp.DataType.Type.BINARY: "VARBYTE", 171 exp.DataType.Type.BLOB: "VARBYTE", 172 exp.DataType.Type.INT: "INTEGER", 173 exp.DataType.Type.TIMETZ: "TIME", 174 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 175 exp.DataType.Type.VARBINARY: "VARBYTE", 176 exp.DataType.Type.ROWVERSION: "VARBYTE", 177 } 178 179 TRANSFORMS = { 180 **Postgres.Generator.TRANSFORMS, 181 exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CONCAT"), 182 exp.Concat: concat_to_dpipe_sql, 183 exp.ConcatWs: concat_ws_to_dpipe_sql, 184 exp.ApproxDistinct: lambda self, 185 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 186 exp.CurrentTimestamp: lambda self, e: ( 187 "SYSDATE" if e.args.get("sysdate") else "GETDATE()" 188 ), 189 exp.DateAdd: date_delta_sql("DATEADD"), 190 exp.DateDiff: date_delta_sql("DATEDIFF"), 191 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 192 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 193 exp.Explode: lambda self, e: self.explode_sql(e), 194 exp.FromBase: rename_func("STRTOL"), 195 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 196 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 197 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 198 exp.GroupConcat: rename_func("LISTAGG"), 199 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 200 exp.Select: transforms.preprocess( 201 [ 202 transforms.eliminate_window_clause, 203 transforms.eliminate_distinct_on, 204 transforms.eliminate_semi_and_anti_joins, 205 transforms.unqualify_unnest, 206 transforms.unnest_generate_date_array_using_recursive_cte, 207 ] 208 ), 209 exp.SortKeyProperty: lambda self, 210 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 211 exp.StartsWith: lambda self, 212 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 213 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 214 exp.TableSample: no_tablesample_sql, 215 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 216 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 217 exp.UnixToTime: lambda self, e: self._unix_to_time_sql(e), 218 } 219 220 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 221 TRANSFORMS.pop(exp.Pivot) 222 223 # Postgres doesn't support JSON_PARSE, but Redshift does 224 TRANSFORMS.pop(exp.ParseJSON) 225 226 # Redshift supports these functions 227 TRANSFORMS.pop(exp.AnyValue) 228 TRANSFORMS.pop(exp.LastDay) 229 TRANSFORMS.pop(exp.SHA2) 230 231 RESERVED_KEYWORDS = { 232 "aes128", 233 "aes256", 234 "all", 235 "allowoverwrite", 236 "analyse", 237 "analyze", 238 "and", 239 "any", 240 "array", 241 "as", 242 "asc", 243 "authorization", 244 "az64", 245 "backup", 246 "between", 247 "binary", 248 "blanksasnull", 249 "both", 250 "bytedict", 251 "bzip2", 252 "case", 253 "cast", 254 "check", 255 "collate", 256 "column", 257 "constraint", 258 "create", 259 "credentials", 260 "cross", 261 "current_date", 262 "current_time", 263 "current_timestamp", 264 "current_user", 265 "current_user_id", 266 "default", 267 "deferrable", 268 "deflate", 269 "defrag", 270 "delta", 271 "delta32k", 272 "desc", 273 "disable", 274 "distinct", 275 "do", 276 "else", 277 "emptyasnull", 278 "enable", 279 "encode", 280 "encrypt ", 281 "encryption", 282 "end", 283 "except", 284 "explicit", 285 "false", 286 "for", 287 "foreign", 288 "freeze", 289 "from", 290 "full", 291 "globaldict256", 292 "globaldict64k", 293 "grant", 294 "group", 295 "gzip", 296 "having", 297 "identity", 298 "ignore", 299 "ilike", 300 "in", 301 "initially", 302 "inner", 303 "intersect", 304 "interval", 305 "into", 306 "is", 307 "isnull", 308 "join", 309 "leading", 310 "left", 311 "like", 312 "limit", 313 "localtime", 314 "localtimestamp", 315 "lun", 316 "luns", 317 "lzo", 318 "lzop", 319 "minus", 320 "mostly16", 321 "mostly32", 322 "mostly8", 323 "natural", 324 "new", 325 "not", 326 "notnull", 327 "null", 328 "nulls", 329 "off", 330 "offline", 331 "offset", 332 "oid", 333 "old", 334 "on", 335 "only", 336 "open", 337 "or", 338 "order", 339 "outer", 340 "overlaps", 341 "parallel", 342 "partition", 343 "percent", 344 "permissions", 345 "pivot", 346 "placing", 347 "primary", 348 "raw", 349 "readratio", 350 "recover", 351 "references", 352 "rejectlog", 353 "resort", 354 "respect", 355 "restore", 356 "right", 357 "select", 358 "session_user", 359 "similar", 360 "snapshot", 361 "some", 362 "sysdate", 363 "system", 364 "table", 365 "tag", 366 "tdes", 367 "text255", 368 "text32k", 369 "then", 370 "timestamp", 371 "to", 372 "top", 373 "trailing", 374 "true", 375 "truncatecolumns", 376 "type", 377 "union", 378 "unique", 379 "unnest", 380 "unpivot", 381 "user", 382 "using", 383 "verbose", 384 "wallet", 385 "when", 386 "where", 387 "with", 388 "without", 389 } 390 391 def unnest_sql(self, expression: exp.Unnest) -> str: 392 args = expression.expressions 393 num_args = len(args) 394 395 if num_args != 1: 396 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 397 return "" 398 399 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 400 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 401 return "" 402 403 arg = self.sql(seq_get(args, 0)) 404 405 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 406 return f"{arg} AS {alias}" if alias else arg 407 408 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 409 if expression.is_type(exp.DataType.Type.JSON): 410 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 411 return self.sql(expression, "this") 412 413 return super().cast_sql(expression, safe_prefix=safe_prefix) 414 415 def datatype_sql(self, expression: exp.DataType) -> str: 416 """ 417 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 418 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 419 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 420 `TEXT` to `VARCHAR`. 421 """ 422 if expression.is_type("text"): 423 expression.set("this", exp.DataType.Type.VARCHAR) 424 precision = expression.args.get("expressions") 425 426 if not precision: 427 expression.append("expressions", exp.var("MAX")) 428 429 return super().datatype_sql(expression) 430 431 def alterset_sql(self, expression: exp.AlterSet) -> str: 432 exprs = self.expressions(expression, flat=True) 433 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 434 location = self.sql(expression, "location") 435 location = f" LOCATION {location}" if location else "" 436 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 437 file_format = f" FILE FORMAT {file_format}" if file_format else "" 438 439 return f"SET{exprs}{location}{file_format}" 440 441 def array_sql(self, expression: exp.Array) -> str: 442 if expression.args.get("bracket_notation"): 443 return super().array_sql(expression) 444 445 return rename_func("ARRAY")(self, expression) 446 447 def explode_sql(self, expression: exp.Explode) -> str: 448 self.unsupported("Unsupported EXPLODE() function") 449 return "" 450 451 def _unix_to_time_sql(self, expression: exp.UnixToTime) -> str: 452 scale = expression.args.get("scale") 453 this = self.sql(expression.this) 454 455 if scale is not None and scale != exp.UnixToTime.SECONDS and scale.is_int: 456 this = f"({this} / POWER(10, {scale.to_py()}))" 457 458 return f"(TIMESTAMP 'epoch' + {this} * INTERVAL '1 SECOND')"
Specifies the strategy according to which identifiers should be normalized.
Whether the ARRAY constructor is context-sensitive, i.e in Redshift ARRAY[1, 2, 3] != ARRAY(1, 2, 3) as the former is of type INT[] vs the latter which is SUPER
Associates this dialect's time formats with their equivalent Python strftime
formats.
Mapping of an escaped sequence (\n
) to its unescaped version (
).
56 class Parser(Postgres.Parser): 57 FUNCTIONS = { 58 **Postgres.Parser.FUNCTIONS, 59 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 60 this=seq_get(args, 0), 61 expression=seq_get(args, 1), 62 unit=exp.var("month"), 63 return_type=exp.DataType.build("TIMESTAMP"), 64 ), 65 "CONVERT_TIMEZONE": lambda args: build_convert_timezone(args, "UTC"), 66 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 67 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 68 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 69 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 70 "GETDATE": exp.CurrentTimestamp.from_arg_list, 71 "LISTAGG": exp.GroupConcat.from_arg_list, 72 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 73 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 74 ), 75 "STRTOL": exp.FromBase.from_arg_list, 76 } 77 78 NO_PAREN_FUNCTION_PARSERS = { 79 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 80 "APPROXIMATE": lambda self: self._parse_approximate_count(), 81 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 82 } 83 84 SUPPORTS_IMPLICIT_UNNEST = True 85 86 def _parse_table( 87 self, 88 schema: bool = False, 89 joins: bool = False, 90 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 91 parse_bracket: bool = False, 92 is_db_reference: bool = False, 93 parse_partition: bool = False, 94 consume_pipe: bool = False, 95 ) -> t.Optional[exp.Expression]: 96 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 97 unpivot = self._match(TokenType.UNPIVOT) 98 table = super()._parse_table( 99 schema=schema, 100 joins=joins, 101 alias_tokens=alias_tokens, 102 parse_bracket=parse_bracket, 103 is_db_reference=is_db_reference, 104 ) 105 106 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 107 108 def _parse_convert( 109 self, strict: bool, safe: t.Optional[bool] = None 110 ) -> t.Optional[exp.Expression]: 111 to = self._parse_types() 112 self._match(TokenType.COMMA) 113 this = self._parse_bitwise() 114 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 115 116 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 117 index = self._index - 1 118 func = self._parse_function() 119 120 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 121 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 122 self._retreat(index) 123 return None
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
Inherited Members
- sqlglot.parser.Parser
- Parser
- STRUCT_TYPE_TOKENS
- NESTED_TYPE_TOKENS
- ENUM_TYPE_TOKENS
- AGGREGATE_TYPE_TOKENS
- TYPE_TOKENS
- SIGNED_TO_UNSIGNED_TYPE_TOKEN
- SUBQUERY_PREDICATES
- RESERVED_TOKENS
- DB_CREATABLES
- CREATABLES
- ALTERABLES
- ALIAS_TOKENS
- COLON_PLACEHOLDER_TOKENS
- ARRAY_CONSTRUCTORS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- ASSIGNMENT
- DISJUNCTION
- EQUALITY
- COMPARISON
- TERM
- FACTOR
- TIMES
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- EXPRESSION_PARSERS
- UNARY_PARSERS
- STRING_PARSERS
- NUMERIC_PARSERS
- PRIMARY_PARSERS
- PLACEHOLDER_PARSERS
- PIPE_SYNTAX_TRANSFORM_PARSERS
- CONSTRAINT_PARSERS
- ALTER_PARSERS
- ALTER_ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- INVALID_FUNC_NAME_TOKENS
- FUNCTIONS_WITH_ALIASED_ARGS
- KEY_VALUE_DEFINITIONS
- QUERY_MODIFIER_PARSERS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_LITERAL_PARSERS
- TYPE_CONVERTERS
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- CONFLICT_ACTIONS
- CREATE_SEQUENCE
- ISOLATED_LOADING_OPTIONS
- USABLES
- CAST_ACTIONS
- SCHEMA_BINDING_OPTIONS
- PROCEDURE_OPTIONS
- EXECUTE_AS_OPTIONS
- KEY_CONSTRAINT_OPTIONS
- WINDOW_EXCLUDE_OPTIONS
- INSERT_ALTERNATIVES
- CLONE_KEYWORDS
- HISTORICAL_DATA_PREFIX
- HISTORICAL_DATA_KIND
- OPCLASS_FOLLOW_KEYWORDS
- OPTYPE_FOLLOW_TOKENS
- TABLE_INDEX_HINT_TOKENS
- VIEW_ATTRIBUTES
- WINDOW_ALIAS_TOKENS
- WINDOW_BEFORE_PAREN_TOKENS
- WINDOW_SIDES
- JSON_KEY_VALUE_SEPARATOR_TOKENS
- FETCH_TOKENS
- ADD_CONSTRAINT_TOKENS
- DISTINCT_TOKENS
- NULL_TOKENS
- UNNEST_OFFSET_ALIAS_TOKENS
- SELECT_START_TOKENS
- COPY_INTO_VARLEN_OPTIONS
- IS_JSON_PREDICATE_KIND
- ODBC_DATETIME_LITERALS
- ON_CONDITION_TOKENS
- PRIVILEGE_FOLLOW_TOKENS
- DESCRIBE_STYLES
- ANALYZE_STYLES
- ANALYZE_EXPRESSION_PARSERS
- PARTITION_KEYWORDS
- AMBIGUOUS_ALIAS_TOKENS
- OPERATION_MODIFIERS
- RECURSIVE_CTE_SEARCH_KIND
- MODIFIABLES
- STRICT_CAST
- PREFIXED_PIVOT_COLUMNS
- IDENTIFY_PIVOT_STRINGS
- LOG_DEFAULTS_TO_LN
- TABLESAMPLE_CSV
- DEFAULT_SAMPLING_METHOD
- SET_REQUIRES_ASSIGNMENT_DELIMITER
- TRIM_PATTERN_FIRST
- STRING_ALIASES
- MODIFIERS_ATTACHED_TO_SET_OP
- SET_OP_MODIFIERS
- NO_PAREN_IF_COMMANDS
- COLON_IS_VARIANT_EXTRACT
- VALUES_FOLLOWED_BY_PAREN
- INTERVAL_SPANS
- SUPPORTS_PARTITION_SELECTION
- WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
- OPTIONAL_ALIAS_TOKEN_CTE
- ALTER_RENAME_REQUIRES_COLUMN
- JOINS_HAVE_EQUAL_PRECEDENCE
- ZONE_AWARE_TIMESTAMP_CONSTRUCTOR
- error_level
- error_message_context
- max_errors
- dialect
- reset
- parse
- parse_into
- check_errors
- raise_error
- expression
- validate_expression
- parse_set_operation
- errors
- sql
125 class Tokenizer(Postgres.Tokenizer): 126 BIT_STRINGS = [] 127 HEX_STRINGS = [] 128 STRING_ESCAPES = ["\\", "'"] 129 130 KEYWORDS = { 131 **Postgres.Tokenizer.KEYWORDS, 132 "(+)": TokenType.JOIN_MARKER, 133 "HLLSKETCH": TokenType.HLLSKETCH, 134 "MINUS": TokenType.EXCEPT, 135 "SUPER": TokenType.SUPER, 136 "TOP": TokenType.TOP, 137 "UNLOAD": TokenType.COMMAND, 138 "VARBYTE": TokenType.VARBINARY, 139 "BINARY VARYING": TokenType.VARBINARY, 140 } 141 KEYWORDS.pop("VALUES") 142 143 # Redshift allows # to appear as a table identifier prefix 144 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 145 SINGLE_TOKENS.pop("#")
Inherited Members
- sqlglot.tokens.Tokenizer
- Tokenizer
- RAW_STRINGS
- UNICODE_STRINGS
- IDENTIFIERS
- QUOTES
- IDENTIFIER_ESCAPES
- STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS
- NESTED_COMMENTS
- HINT_START
- TOKENS_PRECEDING_HINT
- WHITE_SPACE
- COMMANDS
- COMMAND_PREFIX_TOKENS
- NUMERIC_LITERALS
- COMMENTS
- dialect
- use_rs_tokenizer
- reset
- tokenize
- tokenize_rs
- size
- sql
- tokens
147 class Generator(Postgres.Generator): 148 LOCKING_READS_SUPPORTED = False 149 QUERY_HINTS = False 150 VALUES_AS_TABLE = False 151 TZ_TO_WITH_TIME_ZONE = True 152 NVL2_SUPPORTED = True 153 LAST_DAY_SUPPORTS_DATE_PART = False 154 CAN_IMPLEMENT_ARRAY_ANY = False 155 MULTI_ARG_DISTINCT = True 156 COPY_PARAMS_ARE_WRAPPED = False 157 HEX_FUNC = "TO_HEX" 158 PARSE_JSON_NAME = "JSON_PARSE" 159 ARRAY_CONCAT_IS_VAR_LEN = False 160 SUPPORTS_CONVERT_TIMEZONE = True 161 EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False 162 SUPPORTS_MEDIAN = True 163 ALTER_SET_TYPE = "TYPE" 164 165 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 166 WITH_PROPERTIES_PREFIX = " " 167 168 TYPE_MAPPING = { 169 **Postgres.Generator.TYPE_MAPPING, 170 exp.DataType.Type.BINARY: "VARBYTE", 171 exp.DataType.Type.BLOB: "VARBYTE", 172 exp.DataType.Type.INT: "INTEGER", 173 exp.DataType.Type.TIMETZ: "TIME", 174 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 175 exp.DataType.Type.VARBINARY: "VARBYTE", 176 exp.DataType.Type.ROWVERSION: "VARBYTE", 177 } 178 179 TRANSFORMS = { 180 **Postgres.Generator.TRANSFORMS, 181 exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CONCAT"), 182 exp.Concat: concat_to_dpipe_sql, 183 exp.ConcatWs: concat_ws_to_dpipe_sql, 184 exp.ApproxDistinct: lambda self, 185 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 186 exp.CurrentTimestamp: lambda self, e: ( 187 "SYSDATE" if e.args.get("sysdate") else "GETDATE()" 188 ), 189 exp.DateAdd: date_delta_sql("DATEADD"), 190 exp.DateDiff: date_delta_sql("DATEDIFF"), 191 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 192 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 193 exp.Explode: lambda self, e: self.explode_sql(e), 194 exp.FromBase: rename_func("STRTOL"), 195 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 196 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 197 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 198 exp.GroupConcat: rename_func("LISTAGG"), 199 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 200 exp.Select: transforms.preprocess( 201 [ 202 transforms.eliminate_window_clause, 203 transforms.eliminate_distinct_on, 204 transforms.eliminate_semi_and_anti_joins, 205 transforms.unqualify_unnest, 206 transforms.unnest_generate_date_array_using_recursive_cte, 207 ] 208 ), 209 exp.SortKeyProperty: lambda self, 210 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 211 exp.StartsWith: lambda self, 212 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 213 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 214 exp.TableSample: no_tablesample_sql, 215 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 216 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 217 exp.UnixToTime: lambda self, e: self._unix_to_time_sql(e), 218 } 219 220 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 221 TRANSFORMS.pop(exp.Pivot) 222 223 # Postgres doesn't support JSON_PARSE, but Redshift does 224 TRANSFORMS.pop(exp.ParseJSON) 225 226 # Redshift supports these functions 227 TRANSFORMS.pop(exp.AnyValue) 228 TRANSFORMS.pop(exp.LastDay) 229 TRANSFORMS.pop(exp.SHA2) 230 231 RESERVED_KEYWORDS = { 232 "aes128", 233 "aes256", 234 "all", 235 "allowoverwrite", 236 "analyse", 237 "analyze", 238 "and", 239 "any", 240 "array", 241 "as", 242 "asc", 243 "authorization", 244 "az64", 245 "backup", 246 "between", 247 "binary", 248 "blanksasnull", 249 "both", 250 "bytedict", 251 "bzip2", 252 "case", 253 "cast", 254 "check", 255 "collate", 256 "column", 257 "constraint", 258 "create", 259 "credentials", 260 "cross", 261 "current_date", 262 "current_time", 263 "current_timestamp", 264 "current_user", 265 "current_user_id", 266 "default", 267 "deferrable", 268 "deflate", 269 "defrag", 270 "delta", 271 "delta32k", 272 "desc", 273 "disable", 274 "distinct", 275 "do", 276 "else", 277 "emptyasnull", 278 "enable", 279 "encode", 280 "encrypt ", 281 "encryption", 282 "end", 283 "except", 284 "explicit", 285 "false", 286 "for", 287 "foreign", 288 "freeze", 289 "from", 290 "full", 291 "globaldict256", 292 "globaldict64k", 293 "grant", 294 "group", 295 "gzip", 296 "having", 297 "identity", 298 "ignore", 299 "ilike", 300 "in", 301 "initially", 302 "inner", 303 "intersect", 304 "interval", 305 "into", 306 "is", 307 "isnull", 308 "join", 309 "leading", 310 "left", 311 "like", 312 "limit", 313 "localtime", 314 "localtimestamp", 315 "lun", 316 "luns", 317 "lzo", 318 "lzop", 319 "minus", 320 "mostly16", 321 "mostly32", 322 "mostly8", 323 "natural", 324 "new", 325 "not", 326 "notnull", 327 "null", 328 "nulls", 329 "off", 330 "offline", 331 "offset", 332 "oid", 333 "old", 334 "on", 335 "only", 336 "open", 337 "or", 338 "order", 339 "outer", 340 "overlaps", 341 "parallel", 342 "partition", 343 "percent", 344 "permissions", 345 "pivot", 346 "placing", 347 "primary", 348 "raw", 349 "readratio", 350 "recover", 351 "references", 352 "rejectlog", 353 "resort", 354 "respect", 355 "restore", 356 "right", 357 "select", 358 "session_user", 359 "similar", 360 "snapshot", 361 "some", 362 "sysdate", 363 "system", 364 "table", 365 "tag", 366 "tdes", 367 "text255", 368 "text32k", 369 "then", 370 "timestamp", 371 "to", 372 "top", 373 "trailing", 374 "true", 375 "truncatecolumns", 376 "type", 377 "union", 378 "unique", 379 "unnest", 380 "unpivot", 381 "user", 382 "using", 383 "verbose", 384 "wallet", 385 "when", 386 "where", 387 "with", 388 "without", 389 } 390 391 def unnest_sql(self, expression: exp.Unnest) -> str: 392 args = expression.expressions 393 num_args = len(args) 394 395 if num_args != 1: 396 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 397 return "" 398 399 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 400 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 401 return "" 402 403 arg = self.sql(seq_get(args, 0)) 404 405 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 406 return f"{arg} AS {alias}" if alias else arg 407 408 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 409 if expression.is_type(exp.DataType.Type.JSON): 410 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 411 return self.sql(expression, "this") 412 413 return super().cast_sql(expression, safe_prefix=safe_prefix) 414 415 def datatype_sql(self, expression: exp.DataType) -> str: 416 """ 417 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 418 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 419 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 420 `TEXT` to `VARCHAR`. 421 """ 422 if expression.is_type("text"): 423 expression.set("this", exp.DataType.Type.VARCHAR) 424 precision = expression.args.get("expressions") 425 426 if not precision: 427 expression.append("expressions", exp.var("MAX")) 428 429 return super().datatype_sql(expression) 430 431 def alterset_sql(self, expression: exp.AlterSet) -> str: 432 exprs = self.expressions(expression, flat=True) 433 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 434 location = self.sql(expression, "location") 435 location = f" LOCATION {location}" if location else "" 436 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 437 file_format = f" FILE FORMAT {file_format}" if file_format else "" 438 439 return f"SET{exprs}{location}{file_format}" 440 441 def array_sql(self, expression: exp.Array) -> str: 442 if expression.args.get("bracket_notation"): 443 return super().array_sql(expression) 444 445 return rename_func("ARRAY")(self, expression) 446 447 def explode_sql(self, expression: exp.Explode) -> str: 448 self.unsupported("Unsupported EXPLODE() function") 449 return "" 450 451 def _unix_to_time_sql(self, expression: exp.UnixToTime) -> str: 452 scale = expression.args.get("scale") 453 this = self.sql(expression.this) 454 455 if scale is not None and scale != exp.UnixToTime.SECONDS and scale.is_int: 456 this = f"({this} / POWER(10, {scale.to_py()}))" 457 458 return f"(TIMESTAMP 'epoch' + {this} * INTERVAL '1 SECOND')"
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether to normalize identifiers to lowercase. Default: False.
- pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
- indent: The indentation size in a formatted string. For example, this affects the
indentation of subqueries and filters under a
WHERE
clause. Default: 2. - normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether to preserve comments in the output SQL code. Default: True
391 def unnest_sql(self, expression: exp.Unnest) -> str: 392 args = expression.expressions 393 num_args = len(args) 394 395 if num_args != 1: 396 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 397 return "" 398 399 if isinstance(expression.find_ancestor(exp.From, exp.Join, exp.Select), exp.Select): 400 self.unsupported("Unsupported UNNEST when not used in FROM/JOIN clauses") 401 return "" 402 403 arg = self.sql(seq_get(args, 0)) 404 405 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 406 return f"{arg} AS {alias}" if alias else arg
408 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 409 if expression.is_type(exp.DataType.Type.JSON): 410 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 411 return self.sql(expression, "this") 412 413 return super().cast_sql(expression, safe_prefix=safe_prefix)
415 def datatype_sql(self, expression: exp.DataType) -> str: 416 """ 417 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 418 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 419 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 420 `TEXT` to `VARCHAR`. 421 """ 422 if expression.is_type("text"): 423 expression.set("this", exp.DataType.Type.VARCHAR) 424 precision = expression.args.get("expressions") 425 426 if not precision: 427 expression.append("expressions", exp.var("MAX")) 428 429 return super().datatype_sql(expression)
Redshift converts the TEXT
data type to VARCHAR(255)
by default when people more generally mean
VARCHAR of max length which is VARCHAR(max)
in Redshift. Therefore if we get a TEXT
data type
without precision we convert it to VARCHAR(max)
and if it does have precision then we just convert
TEXT
to VARCHAR
.
431 def alterset_sql(self, expression: exp.AlterSet) -> str: 432 exprs = self.expressions(expression, flat=True) 433 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 434 location = self.sql(expression, "location") 435 location = f" LOCATION {location}" if location else "" 436 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 437 file_format = f" FILE FORMAT {file_format}" if file_format else "" 438 439 return f"SET{exprs}{location}{file_format}"
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- IGNORE_NULLS_IN_FUNC
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- INTERVAL_ALLOWS_PLURAL_FORM
- LIMIT_FETCH
- LIMIT_ONLY_LITERALS
- GROUPINGS_SEP
- INDEX_ON
- QUERY_HINT_SEP
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- EXTRACT_ALLOWS_QUOTES
- ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
- UNNEST_WITH_ORDINALITY
- AGGREGATE_FILTER_SUPPORTED
- SEMI_ANTI_JOIN_WITH_SIDE
- COMPUTED_COLUMN_WITH_TYPE
- SUPPORTS_TABLE_COPY
- TABLESAMPLE_REQUIRES_PARENS
- TABLESAMPLE_KEYWORDS
- TABLESAMPLE_WITH_METHOD
- COLLATE_IS_FUNC
- DATA_TYPE_SPECIFIERS_ALLOWED
- ENSURE_BOOLS
- CTE_RECURSIVE_KEYWORD_REQUIRED
- SUPPORTS_SINGLE_ARG_CONCAT
- SUPPORTS_TABLE_ALIAS_COLUMNS
- UNPIVOT_ALIASES_ARE_IDENTIFIERS
- JSON_KEY_VALUE_PAIR_SEP
- INSERT_OVERWRITE
- SUPPORTS_CREATE_TABLE_LIKE
- JSON_PATH_BRACKETED_KEY_SUPPORTED
- JSON_PATH_SINGLE_QUOTE_ESCAPE
- SUPPORTS_TO_NUMBER
- SET_OP_MODIFIERS
- COPY_PARAMS_EQ_REQUIRED
- STAR_EXCEPT
- QUOTE_JSON_PATH
- PAD_FILL_PATTERN_IS_REQUIRED
- SUPPORTS_EXPLODING_PROJECTIONS
- SUPPORTS_UNIX_SECONDS
- ALTER_SET_WRAPPED
- NORMALIZE_EXTRACT_DATE_PARTS
- ARRAY_SIZE_NAME
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- NAMED_PLACEHOLDER_TOKEN
- EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
- WITH_SEPARATED_COMMENTS
- EXCLUDE_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- PARAMETERIZABLE_TEXT_TYPES
- EXPRESSIONS_WITHOUT_NESTED_CTES
- RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS
- SENTINEL_LINE_BREAK
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- dialect
- normalize_functions
- unsupported_messages
- generate
- preprocess
- unsupported
- sep
- seg
- sanitize_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_parts
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- generatedasrowcolumnconstraint_sql
- periodforsystemtimeconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- createable_sql
- create_sql
- sequenceproperties_sql
- clone_sql
- describe_sql
- heredoc_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- unicodestring_sql
- rawstring_sql
- datatypeparam_sql
- directory_sql
- delete_sql
- drop_sql
- set_operation
- set_operations
- fetch_sql
- limitoptions_sql
- filter_sql
- hint_sql
- indexparameters_sql
- index_sql
- identifier_sql
- hex_sql
- lowerhex_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- with_properties
- locate_properties
- property_name
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- partitionboundspec_sql
- partitionedofproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- withsystemversioningproperty_sql
- insert_sql
- introducer_sql
- kill_sql
- pseudotype_sql
- objectidentifier_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- historicaldata_sql
- table_parts
- table_sql
- tablefromrows_sql
- tablesample_sql
- pivot_sql
- version_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- groupingsets_sql
- rollup_sql
- cube_sql
- group_sql
- having_sql
- connect_sql
- prior_sql
- join_sql
- lambda_sql
- lateral_op
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- withfill_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognizemeasure_sql
- matchrecognize_sql
- query_modifiers
- options_modifier
- for_modifiers
- queryoption_sql
- offset_limit_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- prewhere_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_offset_expressions
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- convert_concat_args
- concat_sql
- concatws_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- jsonkeyvalue_sql
- jsonpath_sql
- json_path_part
- formatjson_sql
- jsonobject_sql
- jsonobjectagg_sql
- jsonarray_sql
- jsonarrayagg_sql
- jsoncolumndef_sql
- jsonschema_sql
- jsontable_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- pivotalias_sql
- aliases_sql
- atindex_sql
- attimezone_sql
- fromtimezone_sql
- add_sql
- and_sql
- or_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- alterindex_sql
- alterdiststyle_sql
- altersortkey_sql
- alterrename_sql
- renamecolumn_sql
- alter_sql
- add_column_sql
- droppartition_sql
- addconstraint_sql
- addpartition_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- havingmax_sql
- intdiv_sql
- dpipe_sql
- div_sql
- safedivide_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- propertyeq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- slice_sql
- sub_sql
- trycast_sql
- jsoncast_sql
- try_sql
- log_sql
- use_sql
- binary
- ceil_floor
- function_fallback_sql
- func
- format_args
- too_wide
- format_time
- expressions
- op_expressions
- naked_property
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- whens_sql
- merge_sql
- tochar_sql
- tonumber_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- duplicatekeyproperty_sql
- uniquekeyproperty_sql
- distributedbyproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- checkcolumnconstraint_sql
- indexcolumnconstraint_sql
- nvl2_sql
- comprehension_sql
- columnprefix_sql
- opclass_sql
- predict_sql
- forin_sql
- refresh_sql
- toarray_sql
- tsordstotime_sql
- tsordstotimestamp_sql
- tsordstodatetime_sql
- tsordstodate_sql
- unixdate_sql
- lastday_sql
- dateadd_sql
- arrayany_sql
- struct_sql
- partitionrange_sql
- truncatetable_sql
- convert_sql
- copyparameter_sql
- credentials_sql
- copy_sql
- semicolon_sql
- datadeletionproperty_sql
- maskingpolicycolumnconstraint_sql
- gapfill_sql
- scope_resolution
- scoperesolution_sql
- parsejson_sql
- rand_sql
- changes_sql
- pad_sql
- summarize_sql
- explodinggenerateseries_sql
- arrayconcat_sql
- converttimezone_sql
- json_sql
- jsonvalue_sql
- conditionalinsert_sql
- multitableinserts_sql
- oncondition_sql
- jsonextractquote_sql
- jsonexists_sql
- arrayagg_sql
- apply_sql
- grant_sql
- grantprivilege_sql
- grantprincipal_sql
- columns_sql
- overlay_sql
- todouble_sql
- string_sql
- median_sql
- overflowtruncatebehavior_sql
- unixseconds_sql
- arraysize_sql
- attach_sql
- detach_sql
- attachoption_sql
- featuresattime_sql
- watermarkcolumnconstraint_sql
- encodeproperty_sql
- includeproperty_sql
- xmlelement_sql
- xmlkeyvalueoption_sql
- partitionbyrangeproperty_sql
- partitionbyrangepropertydynamic_sql
- unpivotcolumns_sql
- analyzesample_sql
- analyzestatistics_sql
- analyzehistogram_sql
- analyzedelete_sql
- analyzelistchainedrows_sql
- analyzevalidate_sql
- analyze_sql
- xmltable_sql
- xmlnamespace_sql
- export_sql
- declare_sql
- declareitem_sql
- recursivewithsearch_sql
- parameterizedagg_sql
- anonymousaggfunc_sql
- combinedaggfunc_sql
- combinedparameterizedagg_sql
- show_sql
- get_put_sql
- translatecharacters_sql
- sqlglot.dialects.postgres.Postgres.Generator
- SINGLE_STRING_INTERVAL
- RENAME_TABLE_WITH_DB
- JOIN_HINTS
- TABLE_HINTS
- PARAMETER_TOKEN
- TABLESAMPLE_SIZE_IS_ROWS
- TABLESAMPLE_SEED_KEYWORD
- SUPPORTS_SELECT_INTO
- JSON_TYPE_REQUIRED_FOR_EXTRACTION
- SUPPORTS_UNLOGGED_TABLES
- LIKE_PROPERTY_INSIDE_SCHEMA
- SUPPORTS_WINDOW_EXCLUDE
- COPY_HAS_INTO_KEYWORD
- ARRAY_SIZE_DIM_REQUIRED
- SUPPORTED_JSON_PATH_PARTS
- PROPERTIES_LOCATION
- schemacommentproperty_sql
- commentcolumnconstraint_sql
- bracket_sql
- matchagainst_sql
- computedcolumnconstraint_sql
- isascii_sql
- currentschema_sql
- interval_sql