sqlglot.dialects.oracle
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, generator, parser, tokens, transforms 6from sqlglot.dialects.dialect import ( 7 Dialect, 8 NormalizationStrategy, 9 build_timetostr_or_tochar, 10 build_formatted_time, 11 no_ilike_sql, 12 rename_func, 13 strposition_sql, 14 to_number_with_nls_param, 15 trim_sql, 16) 17from sqlglot.helper import seq_get 18from sqlglot.parser import OPTIONS_TYPE, build_coalesce 19from sqlglot.tokens import TokenType 20 21if t.TYPE_CHECKING: 22 from sqlglot._typing import E 23 24 25def _trim_sql(self: Oracle.Generator, expression: exp.Trim) -> str: 26 position = expression.args.get("position") 27 28 if position and position.upper() in ("LEADING", "TRAILING"): 29 return self.trim_sql(expression) 30 31 return trim_sql(self, expression) 32 33 34def _build_to_timestamp(args: t.List) -> exp.StrToTime | exp.Anonymous: 35 if len(args) == 1: 36 return exp.Anonymous(this="TO_TIMESTAMP", expressions=args) 37 38 return build_formatted_time(exp.StrToTime, "oracle")(args) 39 40 41class Oracle(Dialect): 42 ALIAS_POST_TABLESAMPLE = True 43 LOCKING_READS_SUPPORTED = True 44 TABLESAMPLE_SIZE_IS_PERCENT = True 45 NULL_ORDERING = "nulls_are_large" 46 ON_CONDITION_EMPTY_BEFORE_ERROR = False 47 ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False 48 DISABLES_ALIAS_REF_EXPANSION = True 49 50 # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm 51 NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE 52 53 # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212 54 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes 55 TIME_MAPPING = { 56 "D": "%u", # Day of week (1-7) 57 "DAY": "%A", # name of day 58 "DD": "%d", # day of month (1-31) 59 "DDD": "%j", # day of year (1-366) 60 "DY": "%a", # abbreviated name of day 61 "HH": "%I", # Hour of day (1-12) 62 "HH12": "%I", # alias for HH 63 "HH24": "%H", # Hour of day (0-23) 64 "IW": "%V", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard 65 "MI": "%M", # Minute (0-59) 66 "MM": "%m", # Month (01-12; January = 01) 67 "MON": "%b", # Abbreviated name of month 68 "MONTH": "%B", # Name of month 69 "SS": "%S", # Second (0-59) 70 "WW": "%W", # Week of year (1-53) 71 "YY": "%y", # 15 72 "YYYY": "%Y", # 2015 73 "FF6": "%f", # only 6 digits are supported in python formats 74 } 75 76 PSEUDOCOLUMNS = {"ROWNUM", "ROWID", "OBJECT_ID", "OBJECT_VALUE", "LEVEL"} 77 78 def can_quote(self, identifier: exp.Identifier, identify: str | bool = "safe") -> bool: 79 # Disable quoting for pseudocolumns as it may break queries e.g 80 # `WHERE "ROWNUM" = ...` does not work but `WHERE ROWNUM = ...` does 81 return ( 82 identifier.quoted or not isinstance(identifier.parent, exp.Pseudocolumn) 83 ) and super().can_quote(identifier, identify=identify) 84 85 class Tokenizer(tokens.Tokenizer): 86 VAR_SINGLE_TOKENS = {"@", "$", "#"} 87 88 UNICODE_STRINGS = [ 89 (prefix + q, q) 90 for q in t.cast(t.List[str], tokens.Tokenizer.QUOTES) 91 for prefix in ("U", "u") 92 ] 93 94 NESTED_COMMENTS = False 95 96 KEYWORDS = { 97 **tokens.Tokenizer.KEYWORDS, 98 "(+)": TokenType.JOIN_MARKER, 99 "BINARY_DOUBLE": TokenType.DOUBLE, 100 "BINARY_FLOAT": TokenType.FLOAT, 101 "BULK COLLECT INTO": TokenType.BULK_COLLECT_INTO, 102 "COLUMNS": TokenType.COLUMN, 103 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, 104 "MINUS": TokenType.EXCEPT, 105 "NVARCHAR2": TokenType.NVARCHAR, 106 "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY, 107 "SAMPLE": TokenType.TABLE_SAMPLE, 108 "START": TokenType.BEGIN, 109 "TOP": TokenType.TOP, 110 "VARCHAR2": TokenType.VARCHAR, 111 "SYSTIMESTAMP": TokenType.SYSTIMESTAMP, 112 } 113 114 class Parser(parser.Parser): 115 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP} 116 VALUES_FOLLOWED_BY_PAREN = False 117 118 FUNCTIONS = { 119 **parser.Parser.FUNCTIONS, 120 "CONVERT": exp.ConvertToCharset.from_arg_list, 121 "L2_DISTANCE": exp.EuclideanDistance.from_arg_list, 122 "NVL": lambda args: build_coalesce(args, is_nvl=True), 123 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), 124 "TO_CHAR": build_timetostr_or_tochar, 125 "TO_TIMESTAMP": _build_to_timestamp, 126 "TO_DATE": build_formatted_time(exp.StrToDate, "oracle"), 127 "TRUNC": lambda args: exp.DateTrunc( 128 unit=seq_get(args, 1) or exp.Literal.string("DD"), 129 this=seq_get(args, 0), 130 unabbreviate=False, 131 ), 132 } 133 FUNCTIONS.pop("TO_BOOLEAN") 134 135 NO_PAREN_FUNCTION_PARSERS = { 136 **parser.Parser.NO_PAREN_FUNCTION_PARSERS, 137 "NEXT": lambda self: self._parse_next_value_for(), 138 "PRIOR": lambda self: self.expression(exp.Prior, this=self._parse_bitwise()), 139 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 140 "DBMS_RANDOM": lambda self: self._parse_dbms_random(), 141 } 142 143 NO_PAREN_FUNCTIONS = { 144 **parser.Parser.NO_PAREN_FUNCTIONS, 145 TokenType.SYSTIMESTAMP: exp.Systimestamp, 146 } 147 148 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { 149 **parser.Parser.FUNCTION_PARSERS, 150 "JSON_ARRAY": lambda self: self._parse_json_array( 151 exp.JSONArray, 152 expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())), 153 ), 154 "JSON_ARRAYAGG": lambda self: self._parse_json_array( 155 exp.JSONArrayAgg, 156 this=self._parse_format_json(self._parse_bitwise()), 157 order=self._parse_order(), 158 ), 159 "JSON_EXISTS": lambda self: self._parse_json_exists(), 160 } 161 FUNCTION_PARSERS.pop("CONVERT") 162 163 PROPERTY_PARSERS = { 164 **parser.Parser.PROPERTY_PARSERS, 165 "GLOBAL": lambda self: self._match_text_seq("TEMPORARY") 166 and self.expression(exp.TemporaryProperty, this="GLOBAL"), 167 "PRIVATE": lambda self: self._match_text_seq("TEMPORARY") 168 and self.expression(exp.TemporaryProperty, this="PRIVATE"), 169 "FORCE": lambda self: self.expression(exp.ForceProperty), 170 } 171 172 QUERY_MODIFIER_PARSERS = { 173 **parser.Parser.QUERY_MODIFIER_PARSERS, 174 TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()), 175 TokenType.WITH: lambda self: ("options", [self._parse_query_restrictions()]), 176 } 177 178 TYPE_LITERAL_PARSERS = { 179 exp.DataType.Type.DATE: lambda self, this, _: self.expression( 180 exp.DateStrToDate, this=this 181 ), 182 # https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/NLS_TIMESTAMP_FORMAT.html 183 exp.DataType.Type.TIMESTAMP: lambda self, this, _: _build_to_timestamp( 184 [this, '"%Y-%m-%d %H:%M:%S.%f"'] 185 ), 186 } 187 188 # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT .. 189 # Reference: https://stackoverflow.com/a/336455 190 DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE} 191 192 QUERY_RESTRICTIONS: OPTIONS_TYPE = { 193 "WITH": ( 194 ("READ", "ONLY"), 195 ("CHECK", "OPTION"), 196 ), 197 } 198 199 def _parse_dbms_random(self) -> t.Optional[exp.Expression]: 200 if self._match_text_seq(".", "VALUE"): 201 lower, upper = None, None 202 if self._match(TokenType.L_PAREN, advance=False): 203 lower_upper = self._parse_wrapped_csv(self._parse_bitwise) 204 if len(lower_upper) == 2: 205 lower, upper = lower_upper 206 207 return exp.Rand(lower=lower, upper=upper) 208 209 self._retreat(self._index - 1) 210 return None 211 212 def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E: 213 return self.expression( 214 expr_type, 215 null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"), 216 return_type=self._match_text_seq("RETURNING") and self._parse_type(), 217 strict=self._match_text_seq("STRICT"), 218 **kwargs, 219 ) 220 221 def _parse_hint_function_call(self) -> t.Optional[exp.Expression]: 222 if not self._curr or not self._next or self._next.token_type != TokenType.L_PAREN: 223 return None 224 225 this = self._curr.text 226 227 self._advance(2) 228 args = self._parse_hint_args() 229 this = self.expression(exp.Anonymous, this=this, expressions=args) 230 self._match_r_paren(this) 231 return this 232 233 def _parse_hint_args(self): 234 args = [] 235 result = self._parse_var() 236 237 while result: 238 args.append(result) 239 result = self._parse_var() 240 241 return args 242 243 def _parse_query_restrictions(self) -> t.Optional[exp.Expression]: 244 kind = self._parse_var_from_options(self.QUERY_RESTRICTIONS, raise_unmatched=False) 245 246 if not kind: 247 return None 248 249 return self.expression( 250 exp.QueryOption, 251 this=kind, 252 expression=self._match(TokenType.CONSTRAINT) and self._parse_field(), 253 ) 254 255 def _parse_json_exists(self) -> exp.JSONExists: 256 this = self._parse_format_json(self._parse_bitwise()) 257 self._match(TokenType.COMMA) 258 return self.expression( 259 exp.JSONExists, 260 this=this, 261 path=self.dialect.to_json_path(self._parse_bitwise()), 262 passing=self._match_text_seq("PASSING") 263 and self._parse_csv(lambda: self._parse_alias(self._parse_bitwise())), 264 on_condition=self._parse_on_condition(), 265 ) 266 267 def _parse_into(self) -> t.Optional[exp.Into]: 268 # https://docs.oracle.com/en/database/oracle/oracle-database/19/lnpls/SELECT-INTO-statement.html 269 bulk_collect = self._match(TokenType.BULK_COLLECT_INTO) 270 if not bulk_collect and not self._match(TokenType.INTO): 271 return None 272 273 index = self._index 274 275 expressions = self._parse_expressions() 276 if len(expressions) == 1: 277 self._retreat(index) 278 self._match(TokenType.TABLE) 279 return self.expression( 280 exp.Into, this=self._parse_table(schema=True), bulk_collect=bulk_collect 281 ) 282 283 return self.expression(exp.Into, bulk_collect=bulk_collect, expressions=expressions) 284 285 def _parse_connect_with_prior(self): 286 return self._parse_assignment() 287 288 def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: 289 this = super()._parse_column_ops(this) 290 291 if not this: 292 return this 293 294 index = self._index 295 296 # https://docs.oracle.com/en/database/oracle/oracle-database/26/sqlrf/Interval-Expressions.html 297 interval_span = self._parse_interval_span(this) 298 if isinstance(interval_span.args.get("unit"), exp.IntervalSpan): 299 return interval_span 300 301 self._retreat(index) 302 return this 303 304 def _parse_insert_table(self) -> t.Optional[exp.Expression]: 305 # Oracle does not use AS for INSERT INTO alias 306 # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/INSERT.html 307 # Parse table parts without schema to avoid parsing the alias with its columns 308 this = self._parse_table_parts(schema=True) 309 310 if isinstance(this, exp.Table): 311 alias_name = self._parse_id_var(any_token=False) 312 if alias_name: 313 this.set("alias", exp.TableAlias(this=alias_name)) 314 315 this.set("partition", self._parse_partition()) 316 317 # Now parse the schema (column list) if present 318 return self._parse_schema(this=this) 319 320 return this 321 322 class Generator(generator.Generator): 323 LOCKING_READS_SUPPORTED = True 324 JOIN_HINTS = False 325 TABLE_HINTS = False 326 DATA_TYPE_SPECIFIERS_ALLOWED = True 327 ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False 328 LIMIT_FETCH = "FETCH" 329 TABLESAMPLE_KEYWORDS = "SAMPLE" 330 LAST_DAY_SUPPORTS_DATE_PART = False 331 SUPPORTS_SELECT_INTO = True 332 TZ_TO_WITH_TIME_ZONE = True 333 SUPPORTS_WINDOW_EXCLUDE = True 334 QUERY_HINT_SEP = " " 335 SUPPORTS_DECODE_CASE = True 336 337 TYPE_MAPPING = { 338 **generator.Generator.TYPE_MAPPING, 339 exp.DataType.Type.TINYINT: "SMALLINT", 340 exp.DataType.Type.SMALLINT: "SMALLINT", 341 exp.DataType.Type.INT: "INT", 342 exp.DataType.Type.BIGINT: "INT", 343 exp.DataType.Type.DECIMAL: "NUMBER", 344 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", 345 exp.DataType.Type.VARCHAR: "VARCHAR2", 346 exp.DataType.Type.NVARCHAR: "NVARCHAR2", 347 exp.DataType.Type.NCHAR: "NCHAR", 348 exp.DataType.Type.TEXT: "CLOB", 349 exp.DataType.Type.TIMETZ: "TIME", 350 exp.DataType.Type.TIMESTAMPNTZ: "TIMESTAMP", 351 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 352 exp.DataType.Type.BINARY: "BLOB", 353 exp.DataType.Type.VARBINARY: "BLOB", 354 exp.DataType.Type.ROWVERSION: "BLOB", 355 } 356 TYPE_MAPPING.pop(exp.DataType.Type.BLOB) 357 358 TRANSFORMS = { 359 **generator.Generator.TRANSFORMS, 360 exp.DateStrToDate: lambda self, e: self.func( 361 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD") 362 ), 363 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.unit), 364 exp.EuclideanDistance: rename_func("L2_DISTANCE"), 365 exp.ILike: no_ilike_sql, 366 exp.LogicalOr: rename_func("MAX"), 367 exp.LogicalAnd: rename_func("MIN"), 368 exp.Mod: rename_func("MOD"), 369 exp.Rand: rename_func("DBMS_RANDOM.VALUE"), 370 exp.Select: transforms.preprocess( 371 [ 372 transforms.eliminate_distinct_on, 373 transforms.eliminate_qualify, 374 ] 375 ), 376 exp.StrPosition: lambda self, e: ( 377 strposition_sql( 378 self, e, func_name="INSTR", supports_position=True, supports_occurrence=True 379 ) 380 ), 381 exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)), 382 exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)), 383 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "), 384 exp.Substring: rename_func("SUBSTR"), 385 exp.Table: lambda self, e: self.table_sql(e, sep=" "), 386 exp.TableSample: lambda self, e: self.tablesample_sql(e), 387 exp.TemporaryProperty: lambda _, e: f"{e.name or 'GLOBAL'} TEMPORARY", 388 exp.TimeToStr: lambda self, e: self.func("TO_CHAR", e.this, self.format_time(e)), 389 exp.ToChar: lambda self, e: self.function_fallback_sql(e), 390 exp.ToNumber: to_number_with_nls_param, 391 exp.Trim: _trim_sql, 392 exp.Unicode: lambda self, e: f"ASCII(UNISTR({self.sql(e.this)}))", 393 exp.UnixToTime: lambda self, 394 e: f"TO_DATE('1970-01-01', 'YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)", 395 exp.UtcTimestamp: rename_func("UTC_TIMESTAMP"), 396 exp.UtcTime: rename_func("UTC_TIME"), 397 exp.Systimestamp: lambda self, e: "SYSTIMESTAMP", 398 } 399 400 PROPERTIES_LOCATION = { 401 **generator.Generator.PROPERTIES_LOCATION, 402 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 403 } 404 405 def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str: 406 if expression.args.get("sysdate"): 407 return "SYSDATE" 408 409 this = expression.this 410 return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP" 411 412 def offset_sql(self, expression: exp.Offset) -> str: 413 return f"{super().offset_sql(expression)} ROWS" 414 415 def add_column_sql(self, expression: exp.Expression) -> str: 416 return f"ADD {self.sql(expression)}" 417 418 def queryoption_sql(self, expression: exp.QueryOption) -> str: 419 option = self.sql(expression, "this") 420 value = self.sql(expression, "expression") 421 value = f" CONSTRAINT {value}" if value else "" 422 423 return f"{option}{value}" 424 425 def coalesce_sql(self, expression: exp.Coalesce) -> str: 426 func_name = "NVL" if expression.args.get("is_nvl") else "COALESCE" 427 return rename_func(func_name)(self, expression) 428 429 def into_sql(self, expression: exp.Into) -> str: 430 into = "INTO" if not expression.args.get("bulk_collect") else "BULK COLLECT INTO" 431 if expression.this: 432 return f"{self.seg(into)} {self.sql(expression, 'this')}" 433 434 return f"{self.seg(into)} {self.expressions(expression)}" 435 436 def hint_sql(self, expression: exp.Hint) -> str: 437 expressions = [] 438 439 for expression in expression.expressions: 440 if isinstance(expression, exp.Anonymous): 441 formatted_args = self.format_args(*expression.expressions, sep=" ") 442 expressions.append(f"{self.sql(expression, 'this')}({formatted_args})") 443 else: 444 expressions.append(self.sql(expression)) 445 446 return f" /*+ {self.expressions(sqls=expressions, sep=self.QUERY_HINT_SEP).strip()} */" 447 448 def isascii_sql(self, expression: exp.IsAscii) -> str: 449 return f"NVL(REGEXP_LIKE({self.sql(expression.this)}, '^[' || CHR(1) || '-' || CHR(127) || ']*$'), TRUE)" 450 451 def interval_sql(self, expression: exp.Interval) -> str: 452 return f"{'INTERVAL ' if isinstance(expression.this, exp.Literal) else ''}{self.sql(expression, 'this')} {self.sql(expression, 'unit')}" 453 454 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: 455 param_constraint = expression.find(exp.InOutColumnConstraint) 456 if param_constraint: 457 sep = f" {self.sql(param_constraint)} " 458 param_constraint.pop() 459 return super().columndef_sql(expression, sep)
42class Oracle(Dialect): 43 ALIAS_POST_TABLESAMPLE = True 44 LOCKING_READS_SUPPORTED = True 45 TABLESAMPLE_SIZE_IS_PERCENT = True 46 NULL_ORDERING = "nulls_are_large" 47 ON_CONDITION_EMPTY_BEFORE_ERROR = False 48 ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False 49 DISABLES_ALIAS_REF_EXPANSION = True 50 51 # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm 52 NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE 53 54 # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212 55 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes 56 TIME_MAPPING = { 57 "D": "%u", # Day of week (1-7) 58 "DAY": "%A", # name of day 59 "DD": "%d", # day of month (1-31) 60 "DDD": "%j", # day of year (1-366) 61 "DY": "%a", # abbreviated name of day 62 "HH": "%I", # Hour of day (1-12) 63 "HH12": "%I", # alias for HH 64 "HH24": "%H", # Hour of day (0-23) 65 "IW": "%V", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard 66 "MI": "%M", # Minute (0-59) 67 "MM": "%m", # Month (01-12; January = 01) 68 "MON": "%b", # Abbreviated name of month 69 "MONTH": "%B", # Name of month 70 "SS": "%S", # Second (0-59) 71 "WW": "%W", # Week of year (1-53) 72 "YY": "%y", # 15 73 "YYYY": "%Y", # 2015 74 "FF6": "%f", # only 6 digits are supported in python formats 75 } 76 77 PSEUDOCOLUMNS = {"ROWNUM", "ROWID", "OBJECT_ID", "OBJECT_VALUE", "LEVEL"} 78 79 def can_quote(self, identifier: exp.Identifier, identify: str | bool = "safe") -> bool: 80 # Disable quoting for pseudocolumns as it may break queries e.g 81 # `WHERE "ROWNUM" = ...` does not work but `WHERE ROWNUM = ...` does 82 return ( 83 identifier.quoted or not isinstance(identifier.parent, exp.Pseudocolumn) 84 ) and super().can_quote(identifier, identify=identify) 85 86 class Tokenizer(tokens.Tokenizer): 87 VAR_SINGLE_TOKENS = {"@", "$", "#"} 88 89 UNICODE_STRINGS = [ 90 (prefix + q, q) 91 for q in t.cast(t.List[str], tokens.Tokenizer.QUOTES) 92 for prefix in ("U", "u") 93 ] 94 95 NESTED_COMMENTS = False 96 97 KEYWORDS = { 98 **tokens.Tokenizer.KEYWORDS, 99 "(+)": TokenType.JOIN_MARKER, 100 "BINARY_DOUBLE": TokenType.DOUBLE, 101 "BINARY_FLOAT": TokenType.FLOAT, 102 "BULK COLLECT INTO": TokenType.BULK_COLLECT_INTO, 103 "COLUMNS": TokenType.COLUMN, 104 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, 105 "MINUS": TokenType.EXCEPT, 106 "NVARCHAR2": TokenType.NVARCHAR, 107 "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY, 108 "SAMPLE": TokenType.TABLE_SAMPLE, 109 "START": TokenType.BEGIN, 110 "TOP": TokenType.TOP, 111 "VARCHAR2": TokenType.VARCHAR, 112 "SYSTIMESTAMP": TokenType.SYSTIMESTAMP, 113 } 114 115 class Parser(parser.Parser): 116 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP} 117 VALUES_FOLLOWED_BY_PAREN = False 118 119 FUNCTIONS = { 120 **parser.Parser.FUNCTIONS, 121 "CONVERT": exp.ConvertToCharset.from_arg_list, 122 "L2_DISTANCE": exp.EuclideanDistance.from_arg_list, 123 "NVL": lambda args: build_coalesce(args, is_nvl=True), 124 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), 125 "TO_CHAR": build_timetostr_or_tochar, 126 "TO_TIMESTAMP": _build_to_timestamp, 127 "TO_DATE": build_formatted_time(exp.StrToDate, "oracle"), 128 "TRUNC": lambda args: exp.DateTrunc( 129 unit=seq_get(args, 1) or exp.Literal.string("DD"), 130 this=seq_get(args, 0), 131 unabbreviate=False, 132 ), 133 } 134 FUNCTIONS.pop("TO_BOOLEAN") 135 136 NO_PAREN_FUNCTION_PARSERS = { 137 **parser.Parser.NO_PAREN_FUNCTION_PARSERS, 138 "NEXT": lambda self: self._parse_next_value_for(), 139 "PRIOR": lambda self: self.expression(exp.Prior, this=self._parse_bitwise()), 140 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 141 "DBMS_RANDOM": lambda self: self._parse_dbms_random(), 142 } 143 144 NO_PAREN_FUNCTIONS = { 145 **parser.Parser.NO_PAREN_FUNCTIONS, 146 TokenType.SYSTIMESTAMP: exp.Systimestamp, 147 } 148 149 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { 150 **parser.Parser.FUNCTION_PARSERS, 151 "JSON_ARRAY": lambda self: self._parse_json_array( 152 exp.JSONArray, 153 expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())), 154 ), 155 "JSON_ARRAYAGG": lambda self: self._parse_json_array( 156 exp.JSONArrayAgg, 157 this=self._parse_format_json(self._parse_bitwise()), 158 order=self._parse_order(), 159 ), 160 "JSON_EXISTS": lambda self: self._parse_json_exists(), 161 } 162 FUNCTION_PARSERS.pop("CONVERT") 163 164 PROPERTY_PARSERS = { 165 **parser.Parser.PROPERTY_PARSERS, 166 "GLOBAL": lambda self: self._match_text_seq("TEMPORARY") 167 and self.expression(exp.TemporaryProperty, this="GLOBAL"), 168 "PRIVATE": lambda self: self._match_text_seq("TEMPORARY") 169 and self.expression(exp.TemporaryProperty, this="PRIVATE"), 170 "FORCE": lambda self: self.expression(exp.ForceProperty), 171 } 172 173 QUERY_MODIFIER_PARSERS = { 174 **parser.Parser.QUERY_MODIFIER_PARSERS, 175 TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()), 176 TokenType.WITH: lambda self: ("options", [self._parse_query_restrictions()]), 177 } 178 179 TYPE_LITERAL_PARSERS = { 180 exp.DataType.Type.DATE: lambda self, this, _: self.expression( 181 exp.DateStrToDate, this=this 182 ), 183 # https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/NLS_TIMESTAMP_FORMAT.html 184 exp.DataType.Type.TIMESTAMP: lambda self, this, _: _build_to_timestamp( 185 [this, '"%Y-%m-%d %H:%M:%S.%f"'] 186 ), 187 } 188 189 # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT .. 190 # Reference: https://stackoverflow.com/a/336455 191 DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE} 192 193 QUERY_RESTRICTIONS: OPTIONS_TYPE = { 194 "WITH": ( 195 ("READ", "ONLY"), 196 ("CHECK", "OPTION"), 197 ), 198 } 199 200 def _parse_dbms_random(self) -> t.Optional[exp.Expression]: 201 if self._match_text_seq(".", "VALUE"): 202 lower, upper = None, None 203 if self._match(TokenType.L_PAREN, advance=False): 204 lower_upper = self._parse_wrapped_csv(self._parse_bitwise) 205 if len(lower_upper) == 2: 206 lower, upper = lower_upper 207 208 return exp.Rand(lower=lower, upper=upper) 209 210 self._retreat(self._index - 1) 211 return None 212 213 def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E: 214 return self.expression( 215 expr_type, 216 null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"), 217 return_type=self._match_text_seq("RETURNING") and self._parse_type(), 218 strict=self._match_text_seq("STRICT"), 219 **kwargs, 220 ) 221 222 def _parse_hint_function_call(self) -> t.Optional[exp.Expression]: 223 if not self._curr or not self._next or self._next.token_type != TokenType.L_PAREN: 224 return None 225 226 this = self._curr.text 227 228 self._advance(2) 229 args = self._parse_hint_args() 230 this = self.expression(exp.Anonymous, this=this, expressions=args) 231 self._match_r_paren(this) 232 return this 233 234 def _parse_hint_args(self): 235 args = [] 236 result = self._parse_var() 237 238 while result: 239 args.append(result) 240 result = self._parse_var() 241 242 return args 243 244 def _parse_query_restrictions(self) -> t.Optional[exp.Expression]: 245 kind = self._parse_var_from_options(self.QUERY_RESTRICTIONS, raise_unmatched=False) 246 247 if not kind: 248 return None 249 250 return self.expression( 251 exp.QueryOption, 252 this=kind, 253 expression=self._match(TokenType.CONSTRAINT) and self._parse_field(), 254 ) 255 256 def _parse_json_exists(self) -> exp.JSONExists: 257 this = self._parse_format_json(self._parse_bitwise()) 258 self._match(TokenType.COMMA) 259 return self.expression( 260 exp.JSONExists, 261 this=this, 262 path=self.dialect.to_json_path(self._parse_bitwise()), 263 passing=self._match_text_seq("PASSING") 264 and self._parse_csv(lambda: self._parse_alias(self._parse_bitwise())), 265 on_condition=self._parse_on_condition(), 266 ) 267 268 def _parse_into(self) -> t.Optional[exp.Into]: 269 # https://docs.oracle.com/en/database/oracle/oracle-database/19/lnpls/SELECT-INTO-statement.html 270 bulk_collect = self._match(TokenType.BULK_COLLECT_INTO) 271 if not bulk_collect and not self._match(TokenType.INTO): 272 return None 273 274 index = self._index 275 276 expressions = self._parse_expressions() 277 if len(expressions) == 1: 278 self._retreat(index) 279 self._match(TokenType.TABLE) 280 return self.expression( 281 exp.Into, this=self._parse_table(schema=True), bulk_collect=bulk_collect 282 ) 283 284 return self.expression(exp.Into, bulk_collect=bulk_collect, expressions=expressions) 285 286 def _parse_connect_with_prior(self): 287 return self._parse_assignment() 288 289 def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: 290 this = super()._parse_column_ops(this) 291 292 if not this: 293 return this 294 295 index = self._index 296 297 # https://docs.oracle.com/en/database/oracle/oracle-database/26/sqlrf/Interval-Expressions.html 298 interval_span = self._parse_interval_span(this) 299 if isinstance(interval_span.args.get("unit"), exp.IntervalSpan): 300 return interval_span 301 302 self._retreat(index) 303 return this 304 305 def _parse_insert_table(self) -> t.Optional[exp.Expression]: 306 # Oracle does not use AS for INSERT INTO alias 307 # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/INSERT.html 308 # Parse table parts without schema to avoid parsing the alias with its columns 309 this = self._parse_table_parts(schema=True) 310 311 if isinstance(this, exp.Table): 312 alias_name = self._parse_id_var(any_token=False) 313 if alias_name: 314 this.set("alias", exp.TableAlias(this=alias_name)) 315 316 this.set("partition", self._parse_partition()) 317 318 # Now parse the schema (column list) if present 319 return self._parse_schema(this=this) 320 321 return this 322 323 class Generator(generator.Generator): 324 LOCKING_READS_SUPPORTED = True 325 JOIN_HINTS = False 326 TABLE_HINTS = False 327 DATA_TYPE_SPECIFIERS_ALLOWED = True 328 ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False 329 LIMIT_FETCH = "FETCH" 330 TABLESAMPLE_KEYWORDS = "SAMPLE" 331 LAST_DAY_SUPPORTS_DATE_PART = False 332 SUPPORTS_SELECT_INTO = True 333 TZ_TO_WITH_TIME_ZONE = True 334 SUPPORTS_WINDOW_EXCLUDE = True 335 QUERY_HINT_SEP = " " 336 SUPPORTS_DECODE_CASE = True 337 338 TYPE_MAPPING = { 339 **generator.Generator.TYPE_MAPPING, 340 exp.DataType.Type.TINYINT: "SMALLINT", 341 exp.DataType.Type.SMALLINT: "SMALLINT", 342 exp.DataType.Type.INT: "INT", 343 exp.DataType.Type.BIGINT: "INT", 344 exp.DataType.Type.DECIMAL: "NUMBER", 345 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", 346 exp.DataType.Type.VARCHAR: "VARCHAR2", 347 exp.DataType.Type.NVARCHAR: "NVARCHAR2", 348 exp.DataType.Type.NCHAR: "NCHAR", 349 exp.DataType.Type.TEXT: "CLOB", 350 exp.DataType.Type.TIMETZ: "TIME", 351 exp.DataType.Type.TIMESTAMPNTZ: "TIMESTAMP", 352 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 353 exp.DataType.Type.BINARY: "BLOB", 354 exp.DataType.Type.VARBINARY: "BLOB", 355 exp.DataType.Type.ROWVERSION: "BLOB", 356 } 357 TYPE_MAPPING.pop(exp.DataType.Type.BLOB) 358 359 TRANSFORMS = { 360 **generator.Generator.TRANSFORMS, 361 exp.DateStrToDate: lambda self, e: self.func( 362 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD") 363 ), 364 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.unit), 365 exp.EuclideanDistance: rename_func("L2_DISTANCE"), 366 exp.ILike: no_ilike_sql, 367 exp.LogicalOr: rename_func("MAX"), 368 exp.LogicalAnd: rename_func("MIN"), 369 exp.Mod: rename_func("MOD"), 370 exp.Rand: rename_func("DBMS_RANDOM.VALUE"), 371 exp.Select: transforms.preprocess( 372 [ 373 transforms.eliminate_distinct_on, 374 transforms.eliminate_qualify, 375 ] 376 ), 377 exp.StrPosition: lambda self, e: ( 378 strposition_sql( 379 self, e, func_name="INSTR", supports_position=True, supports_occurrence=True 380 ) 381 ), 382 exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)), 383 exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)), 384 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "), 385 exp.Substring: rename_func("SUBSTR"), 386 exp.Table: lambda self, e: self.table_sql(e, sep=" "), 387 exp.TableSample: lambda self, e: self.tablesample_sql(e), 388 exp.TemporaryProperty: lambda _, e: f"{e.name or 'GLOBAL'} TEMPORARY", 389 exp.TimeToStr: lambda self, e: self.func("TO_CHAR", e.this, self.format_time(e)), 390 exp.ToChar: lambda self, e: self.function_fallback_sql(e), 391 exp.ToNumber: to_number_with_nls_param, 392 exp.Trim: _trim_sql, 393 exp.Unicode: lambda self, e: f"ASCII(UNISTR({self.sql(e.this)}))", 394 exp.UnixToTime: lambda self, 395 e: f"TO_DATE('1970-01-01', 'YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)", 396 exp.UtcTimestamp: rename_func("UTC_TIMESTAMP"), 397 exp.UtcTime: rename_func("UTC_TIME"), 398 exp.Systimestamp: lambda self, e: "SYSTIMESTAMP", 399 } 400 401 PROPERTIES_LOCATION = { 402 **generator.Generator.PROPERTIES_LOCATION, 403 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 404 } 405 406 def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str: 407 if expression.args.get("sysdate"): 408 return "SYSDATE" 409 410 this = expression.this 411 return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP" 412 413 def offset_sql(self, expression: exp.Offset) -> str: 414 return f"{super().offset_sql(expression)} ROWS" 415 416 def add_column_sql(self, expression: exp.Expression) -> str: 417 return f"ADD {self.sql(expression)}" 418 419 def queryoption_sql(self, expression: exp.QueryOption) -> str: 420 option = self.sql(expression, "this") 421 value = self.sql(expression, "expression") 422 value = f" CONSTRAINT {value}" if value else "" 423 424 return f"{option}{value}" 425 426 def coalesce_sql(self, expression: exp.Coalesce) -> str: 427 func_name = "NVL" if expression.args.get("is_nvl") else "COALESCE" 428 return rename_func(func_name)(self, expression) 429 430 def into_sql(self, expression: exp.Into) -> str: 431 into = "INTO" if not expression.args.get("bulk_collect") else "BULK COLLECT INTO" 432 if expression.this: 433 return f"{self.seg(into)} {self.sql(expression, 'this')}" 434 435 return f"{self.seg(into)} {self.expressions(expression)}" 436 437 def hint_sql(self, expression: exp.Hint) -> str: 438 expressions = [] 439 440 for expression in expression.expressions: 441 if isinstance(expression, exp.Anonymous): 442 formatted_args = self.format_args(*expression.expressions, sep=" ") 443 expressions.append(f"{self.sql(expression, 'this')}({formatted_args})") 444 else: 445 expressions.append(self.sql(expression)) 446 447 return f" /*+ {self.expressions(sqls=expressions, sep=self.QUERY_HINT_SEP).strip()} */" 448 449 def isascii_sql(self, expression: exp.IsAscii) -> str: 450 return f"NVL(REGEXP_LIKE({self.sql(expression.this)}, '^[' || CHR(1) || '-' || CHR(127) || ']*$'), TRUE)" 451 452 def interval_sql(self, expression: exp.Interval) -> str: 453 return f"{'INTERVAL ' if isinstance(expression.this, exp.Literal) else ''}{self.sql(expression, 'this')} {self.sql(expression, 'unit')}" 454 455 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: 456 param_constraint = expression.find(exp.InOutColumnConstraint) 457 if param_constraint: 458 sep = f" {self.sql(param_constraint)} " 459 param_constraint.pop() 460 return super().columndef_sql(expression, sep)
Default NULL ordering method to use if not explicitly set.
Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"
Whether "X ON EMPTY" should come before "X ON ERROR" (for dialects like T-SQL, MySQL, Oracle).
Whether alias reference expansion is disabled for this dialect.
Some dialects like Oracle do NOT support referencing aliases in projections or WHERE clauses. The original expression must be repeated instead.
For example, in Oracle: SELECT y.foo AS bar, bar * 2 AS baz FROM y -- INVALID SELECT y.foo AS bar, y.foo * 2 AS baz FROM y -- VALID
Specifies the strategy according to which identifiers should be normalized.
Associates this dialect's time formats with their equivalent Python strftime formats.
Columns that are auto-generated by the engine corresponding to this dialect.
For example, such columns may be excluded from SELECT * queries.
79 def can_quote(self, identifier: exp.Identifier, identify: str | bool = "safe") -> bool: 80 # Disable quoting for pseudocolumns as it may break queries e.g 81 # `WHERE "ROWNUM" = ...` does not work but `WHERE ROWNUM = ...` does 82 return ( 83 identifier.quoted or not isinstance(identifier.parent, exp.Pseudocolumn) 84 ) and super().can_quote(identifier, identify=identify)
Checks if an identifier can be quoted
Arguments:
- identifier: The identifier to check.
- identify:
True: Always returnsTrueexcept for certain cases."safe": Only returnsTrueif the identifier is case-insensitive."unsafe": Only returnsTrueif the identifier is case-sensitive.
Returns:
Whether the given text can be identified.
86 class Tokenizer(tokens.Tokenizer): 87 VAR_SINGLE_TOKENS = {"@", "$", "#"} 88 89 UNICODE_STRINGS = [ 90 (prefix + q, q) 91 for q in t.cast(t.List[str], tokens.Tokenizer.QUOTES) 92 for prefix in ("U", "u") 93 ] 94 95 NESTED_COMMENTS = False 96 97 KEYWORDS = { 98 **tokens.Tokenizer.KEYWORDS, 99 "(+)": TokenType.JOIN_MARKER, 100 "BINARY_DOUBLE": TokenType.DOUBLE, 101 "BINARY_FLOAT": TokenType.FLOAT, 102 "BULK COLLECT INTO": TokenType.BULK_COLLECT_INTO, 103 "COLUMNS": TokenType.COLUMN, 104 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, 105 "MINUS": TokenType.EXCEPT, 106 "NVARCHAR2": TokenType.NVARCHAR, 107 "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY, 108 "SAMPLE": TokenType.TABLE_SAMPLE, 109 "START": TokenType.BEGIN, 110 "TOP": TokenType.TOP, 111 "VARCHAR2": TokenType.VARCHAR, 112 "SYSTIMESTAMP": TokenType.SYSTIMESTAMP, 113 }
Inherited Members
- sqlglot.tokens.Tokenizer
- Tokenizer
- SINGLE_TOKENS
- BIT_STRINGS
- BYTE_STRINGS
- HEX_STRINGS
- RAW_STRINGS
- HEREDOC_STRINGS
- IDENTIFIERS
- QUOTES
- STRING_ESCAPES
- ESCAPE_FOLLOW_CHARS
- IDENTIFIER_ESCAPES
- HEREDOC_TAG_IS_IDENTIFIER
- HEREDOC_STRING_ALTERNATIVE
- STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS
- HINT_START
- TOKENS_PRECEDING_HINT
- WHITE_SPACE
- COMMANDS
- COMMAND_PREFIX_TOKENS
- NUMERIC_LITERALS
- COMMENTS
- dialect
- use_rs_tokenizer
- reset
- tokenize
- tokenize_rs
- size
- sql
- tokens
115 class Parser(parser.Parser): 116 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP} 117 VALUES_FOLLOWED_BY_PAREN = False 118 119 FUNCTIONS = { 120 **parser.Parser.FUNCTIONS, 121 "CONVERT": exp.ConvertToCharset.from_arg_list, 122 "L2_DISTANCE": exp.EuclideanDistance.from_arg_list, 123 "NVL": lambda args: build_coalesce(args, is_nvl=True), 124 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), 125 "TO_CHAR": build_timetostr_or_tochar, 126 "TO_TIMESTAMP": _build_to_timestamp, 127 "TO_DATE": build_formatted_time(exp.StrToDate, "oracle"), 128 "TRUNC": lambda args: exp.DateTrunc( 129 unit=seq_get(args, 1) or exp.Literal.string("DD"), 130 this=seq_get(args, 0), 131 unabbreviate=False, 132 ), 133 } 134 FUNCTIONS.pop("TO_BOOLEAN") 135 136 NO_PAREN_FUNCTION_PARSERS = { 137 **parser.Parser.NO_PAREN_FUNCTION_PARSERS, 138 "NEXT": lambda self: self._parse_next_value_for(), 139 "PRIOR": lambda self: self.expression(exp.Prior, this=self._parse_bitwise()), 140 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 141 "DBMS_RANDOM": lambda self: self._parse_dbms_random(), 142 } 143 144 NO_PAREN_FUNCTIONS = { 145 **parser.Parser.NO_PAREN_FUNCTIONS, 146 TokenType.SYSTIMESTAMP: exp.Systimestamp, 147 } 148 149 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { 150 **parser.Parser.FUNCTION_PARSERS, 151 "JSON_ARRAY": lambda self: self._parse_json_array( 152 exp.JSONArray, 153 expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())), 154 ), 155 "JSON_ARRAYAGG": lambda self: self._parse_json_array( 156 exp.JSONArrayAgg, 157 this=self._parse_format_json(self._parse_bitwise()), 158 order=self._parse_order(), 159 ), 160 "JSON_EXISTS": lambda self: self._parse_json_exists(), 161 } 162 FUNCTION_PARSERS.pop("CONVERT") 163 164 PROPERTY_PARSERS = { 165 **parser.Parser.PROPERTY_PARSERS, 166 "GLOBAL": lambda self: self._match_text_seq("TEMPORARY") 167 and self.expression(exp.TemporaryProperty, this="GLOBAL"), 168 "PRIVATE": lambda self: self._match_text_seq("TEMPORARY") 169 and self.expression(exp.TemporaryProperty, this="PRIVATE"), 170 "FORCE": lambda self: self.expression(exp.ForceProperty), 171 } 172 173 QUERY_MODIFIER_PARSERS = { 174 **parser.Parser.QUERY_MODIFIER_PARSERS, 175 TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()), 176 TokenType.WITH: lambda self: ("options", [self._parse_query_restrictions()]), 177 } 178 179 TYPE_LITERAL_PARSERS = { 180 exp.DataType.Type.DATE: lambda self, this, _: self.expression( 181 exp.DateStrToDate, this=this 182 ), 183 # https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/NLS_TIMESTAMP_FORMAT.html 184 exp.DataType.Type.TIMESTAMP: lambda self, this, _: _build_to_timestamp( 185 [this, '"%Y-%m-%d %H:%M:%S.%f"'] 186 ), 187 } 188 189 # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT .. 190 # Reference: https://stackoverflow.com/a/336455 191 DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE} 192 193 QUERY_RESTRICTIONS: OPTIONS_TYPE = { 194 "WITH": ( 195 ("READ", "ONLY"), 196 ("CHECK", "OPTION"), 197 ), 198 } 199 200 def _parse_dbms_random(self) -> t.Optional[exp.Expression]: 201 if self._match_text_seq(".", "VALUE"): 202 lower, upper = None, None 203 if self._match(TokenType.L_PAREN, advance=False): 204 lower_upper = self._parse_wrapped_csv(self._parse_bitwise) 205 if len(lower_upper) == 2: 206 lower, upper = lower_upper 207 208 return exp.Rand(lower=lower, upper=upper) 209 210 self._retreat(self._index - 1) 211 return None 212 213 def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E: 214 return self.expression( 215 expr_type, 216 null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"), 217 return_type=self._match_text_seq("RETURNING") and self._parse_type(), 218 strict=self._match_text_seq("STRICT"), 219 **kwargs, 220 ) 221 222 def _parse_hint_function_call(self) -> t.Optional[exp.Expression]: 223 if not self._curr or not self._next or self._next.token_type != TokenType.L_PAREN: 224 return None 225 226 this = self._curr.text 227 228 self._advance(2) 229 args = self._parse_hint_args() 230 this = self.expression(exp.Anonymous, this=this, expressions=args) 231 self._match_r_paren(this) 232 return this 233 234 def _parse_hint_args(self): 235 args = [] 236 result = self._parse_var() 237 238 while result: 239 args.append(result) 240 result = self._parse_var() 241 242 return args 243 244 def _parse_query_restrictions(self) -> t.Optional[exp.Expression]: 245 kind = self._parse_var_from_options(self.QUERY_RESTRICTIONS, raise_unmatched=False) 246 247 if not kind: 248 return None 249 250 return self.expression( 251 exp.QueryOption, 252 this=kind, 253 expression=self._match(TokenType.CONSTRAINT) and self._parse_field(), 254 ) 255 256 def _parse_json_exists(self) -> exp.JSONExists: 257 this = self._parse_format_json(self._parse_bitwise()) 258 self._match(TokenType.COMMA) 259 return self.expression( 260 exp.JSONExists, 261 this=this, 262 path=self.dialect.to_json_path(self._parse_bitwise()), 263 passing=self._match_text_seq("PASSING") 264 and self._parse_csv(lambda: self._parse_alias(self._parse_bitwise())), 265 on_condition=self._parse_on_condition(), 266 ) 267 268 def _parse_into(self) -> t.Optional[exp.Into]: 269 # https://docs.oracle.com/en/database/oracle/oracle-database/19/lnpls/SELECT-INTO-statement.html 270 bulk_collect = self._match(TokenType.BULK_COLLECT_INTO) 271 if not bulk_collect and not self._match(TokenType.INTO): 272 return None 273 274 index = self._index 275 276 expressions = self._parse_expressions() 277 if len(expressions) == 1: 278 self._retreat(index) 279 self._match(TokenType.TABLE) 280 return self.expression( 281 exp.Into, this=self._parse_table(schema=True), bulk_collect=bulk_collect 282 ) 283 284 return self.expression(exp.Into, bulk_collect=bulk_collect, expressions=expressions) 285 286 def _parse_connect_with_prior(self): 287 return self._parse_assignment() 288 289 def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: 290 this = super()._parse_column_ops(this) 291 292 if not this: 293 return this 294 295 index = self._index 296 297 # https://docs.oracle.com/en/database/oracle/oracle-database/26/sqlrf/Interval-Expressions.html 298 interval_span = self._parse_interval_span(this) 299 if isinstance(interval_span.args.get("unit"), exp.IntervalSpan): 300 return interval_span 301 302 self._retreat(index) 303 return this 304 305 def _parse_insert_table(self) -> t.Optional[exp.Expression]: 306 # Oracle does not use AS for INSERT INTO alias 307 # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/INSERT.html 308 # Parse table parts without schema to avoid parsing the alias with its columns 309 this = self._parse_table_parts(schema=True) 310 311 if isinstance(this, exp.Table): 312 alias_name = self._parse_id_var(any_token=False) 313 if alias_name: 314 this.set("alias", exp.TableAlias(this=alias_name)) 315 316 this.set("partition", self._parse_partition()) 317 318 # Now parse the schema (column list) if present 319 return self._parse_schema(this=this) 320 321 return this
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
Inherited Members
- sqlglot.parser.Parser
- Parser
- STRUCT_TYPE_TOKENS
- NESTED_TYPE_TOKENS
- ENUM_TYPE_TOKENS
- AGGREGATE_TYPE_TOKENS
- TYPE_TOKENS
- SIGNED_TO_UNSIGNED_TYPE_TOKEN
- SUBQUERY_PREDICATES
- RESERVED_TOKENS
- DB_CREATABLES
- CREATABLES
- ALTERABLES
- ALIAS_TOKENS
- COLON_PLACEHOLDER_TOKENS
- ARRAY_CONSTRUCTORS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- ASSIGNMENT
- DISJUNCTION
- EQUALITY
- COMPARISON
- BITWISE
- TERM
- FACTOR
- EXPONENT
- TIMES
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- COLUMN_OPERATORS
- CAST_COLUMN_OPERATORS
- EXPRESSION_PARSERS
- STATEMENT_PARSERS
- UNARY_PARSERS
- STRING_PARSERS
- NUMERIC_PARSERS
- PRIMARY_PARSERS
- PLACEHOLDER_PARSERS
- RANGE_PARSERS
- PIPE_SYNTAX_TRANSFORM_PARSERS
- CONSTRAINT_PARSERS
- ALTER_PARSERS
- ALTER_ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- INVALID_FUNC_NAME_TOKENS
- FUNCTIONS_WITH_ALIASED_ARGS
- KEY_VALUE_DEFINITIONS
- QUERY_MODIFIER_TOKENS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_CONVERTERS
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- CONFLICT_ACTIONS
- CREATE_SEQUENCE
- ISOLATED_LOADING_OPTIONS
- USABLES
- CAST_ACTIONS
- SCHEMA_BINDING_OPTIONS
- PROCEDURE_OPTIONS
- EXECUTE_AS_OPTIONS
- KEY_CONSTRAINT_OPTIONS
- WINDOW_EXCLUDE_OPTIONS
- INSERT_ALTERNATIVES
- CLONE_KEYWORDS
- HISTORICAL_DATA_PREFIX
- HISTORICAL_DATA_KIND
- OPCLASS_FOLLOW_KEYWORDS
- OPTYPE_FOLLOW_TOKENS
- TABLE_INDEX_HINT_TOKENS
- VIEW_ATTRIBUTES
- WINDOW_ALIAS_TOKENS
- WINDOW_SIDES
- JSON_KEY_VALUE_SEPARATOR_TOKENS
- FETCH_TOKENS
- ADD_CONSTRAINT_TOKENS
- UNNEST_OFFSET_ALIAS_TOKENS
- SELECT_START_TOKENS
- COPY_INTO_VARLEN_OPTIONS
- IS_JSON_PREDICATE_KIND
- ODBC_DATETIME_LITERALS
- ON_CONDITION_TOKENS
- PRIVILEGE_FOLLOW_TOKENS
- DESCRIBE_STYLES
- SET_ASSIGNMENT_DELIMITERS
- ANALYZE_STYLES
- ANALYZE_EXPRESSION_PARSERS
- PARTITION_KEYWORDS
- AMBIGUOUS_ALIAS_TOKENS
- OPERATION_MODIFIERS
- RECURSIVE_CTE_SEARCH_KIND
- MODIFIABLES
- STRICT_CAST
- PREFIXED_PIVOT_COLUMNS
- IDENTIFY_PIVOT_STRINGS
- LOG_DEFAULTS_TO_LN
- TABLESAMPLE_CSV
- DEFAULT_SAMPLING_METHOD
- SET_REQUIRES_ASSIGNMENT_DELIMITER
- TRIM_PATTERN_FIRST
- STRING_ALIASES
- MODIFIERS_ATTACHED_TO_SET_OP
- SET_OP_MODIFIERS
- NO_PAREN_IF_COMMANDS
- JSON_ARROWS_REQUIRE_JSON_TYPE
- COLON_IS_VARIANT_EXTRACT
- SUPPORTS_IMPLICIT_UNNEST
- INTERVAL_SPANS
- SUPPORTS_PARTITION_SELECTION
- WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
- OPTIONAL_ALIAS_TOKEN_CTE
- ALTER_RENAME_REQUIRES_COLUMN
- ALTER_TABLE_PARTITIONS
- JOINS_HAVE_EQUAL_PRECEDENCE
- ZONE_AWARE_TIMESTAMP_CONSTRUCTOR
- MAP_KEYS_ARE_ARBITRARY_EXPRESSIONS
- JSON_EXTRACT_REQUIRES_JSON_EXPRESSION
- ADD_JOIN_ON_TRUE
- SUPPORTS_OMITTED_INTERVAL_SPAN_UNIT
- error_level
- error_message_context
- max_errors
- dialect
- reset
- parse
- parse_into
- check_errors
- raise_error
- expression
- validate_expression
- parse_set_operation
- build_cast
- errors
- sql
323 class Generator(generator.Generator): 324 LOCKING_READS_SUPPORTED = True 325 JOIN_HINTS = False 326 TABLE_HINTS = False 327 DATA_TYPE_SPECIFIERS_ALLOWED = True 328 ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False 329 LIMIT_FETCH = "FETCH" 330 TABLESAMPLE_KEYWORDS = "SAMPLE" 331 LAST_DAY_SUPPORTS_DATE_PART = False 332 SUPPORTS_SELECT_INTO = True 333 TZ_TO_WITH_TIME_ZONE = True 334 SUPPORTS_WINDOW_EXCLUDE = True 335 QUERY_HINT_SEP = " " 336 SUPPORTS_DECODE_CASE = True 337 338 TYPE_MAPPING = { 339 **generator.Generator.TYPE_MAPPING, 340 exp.DataType.Type.TINYINT: "SMALLINT", 341 exp.DataType.Type.SMALLINT: "SMALLINT", 342 exp.DataType.Type.INT: "INT", 343 exp.DataType.Type.BIGINT: "INT", 344 exp.DataType.Type.DECIMAL: "NUMBER", 345 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", 346 exp.DataType.Type.VARCHAR: "VARCHAR2", 347 exp.DataType.Type.NVARCHAR: "NVARCHAR2", 348 exp.DataType.Type.NCHAR: "NCHAR", 349 exp.DataType.Type.TEXT: "CLOB", 350 exp.DataType.Type.TIMETZ: "TIME", 351 exp.DataType.Type.TIMESTAMPNTZ: "TIMESTAMP", 352 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 353 exp.DataType.Type.BINARY: "BLOB", 354 exp.DataType.Type.VARBINARY: "BLOB", 355 exp.DataType.Type.ROWVERSION: "BLOB", 356 } 357 TYPE_MAPPING.pop(exp.DataType.Type.BLOB) 358 359 TRANSFORMS = { 360 **generator.Generator.TRANSFORMS, 361 exp.DateStrToDate: lambda self, e: self.func( 362 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD") 363 ), 364 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.unit), 365 exp.EuclideanDistance: rename_func("L2_DISTANCE"), 366 exp.ILike: no_ilike_sql, 367 exp.LogicalOr: rename_func("MAX"), 368 exp.LogicalAnd: rename_func("MIN"), 369 exp.Mod: rename_func("MOD"), 370 exp.Rand: rename_func("DBMS_RANDOM.VALUE"), 371 exp.Select: transforms.preprocess( 372 [ 373 transforms.eliminate_distinct_on, 374 transforms.eliminate_qualify, 375 ] 376 ), 377 exp.StrPosition: lambda self, e: ( 378 strposition_sql( 379 self, e, func_name="INSTR", supports_position=True, supports_occurrence=True 380 ) 381 ), 382 exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)), 383 exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)), 384 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "), 385 exp.Substring: rename_func("SUBSTR"), 386 exp.Table: lambda self, e: self.table_sql(e, sep=" "), 387 exp.TableSample: lambda self, e: self.tablesample_sql(e), 388 exp.TemporaryProperty: lambda _, e: f"{e.name or 'GLOBAL'} TEMPORARY", 389 exp.TimeToStr: lambda self, e: self.func("TO_CHAR", e.this, self.format_time(e)), 390 exp.ToChar: lambda self, e: self.function_fallback_sql(e), 391 exp.ToNumber: to_number_with_nls_param, 392 exp.Trim: _trim_sql, 393 exp.Unicode: lambda self, e: f"ASCII(UNISTR({self.sql(e.this)}))", 394 exp.UnixToTime: lambda self, 395 e: f"TO_DATE('1970-01-01', 'YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)", 396 exp.UtcTimestamp: rename_func("UTC_TIMESTAMP"), 397 exp.UtcTime: rename_func("UTC_TIME"), 398 exp.Systimestamp: lambda self, e: "SYSTIMESTAMP", 399 } 400 401 PROPERTIES_LOCATION = { 402 **generator.Generator.PROPERTIES_LOCATION, 403 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 404 } 405 406 def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str: 407 if expression.args.get("sysdate"): 408 return "SYSDATE" 409 410 this = expression.this 411 return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP" 412 413 def offset_sql(self, expression: exp.Offset) -> str: 414 return f"{super().offset_sql(expression)} ROWS" 415 416 def add_column_sql(self, expression: exp.Expression) -> str: 417 return f"ADD {self.sql(expression)}" 418 419 def queryoption_sql(self, expression: exp.QueryOption) -> str: 420 option = self.sql(expression, "this") 421 value = self.sql(expression, "expression") 422 value = f" CONSTRAINT {value}" if value else "" 423 424 return f"{option}{value}" 425 426 def coalesce_sql(self, expression: exp.Coalesce) -> str: 427 func_name = "NVL" if expression.args.get("is_nvl") else "COALESCE" 428 return rename_func(func_name)(self, expression) 429 430 def into_sql(self, expression: exp.Into) -> str: 431 into = "INTO" if not expression.args.get("bulk_collect") else "BULK COLLECT INTO" 432 if expression.this: 433 return f"{self.seg(into)} {self.sql(expression, 'this')}" 434 435 return f"{self.seg(into)} {self.expressions(expression)}" 436 437 def hint_sql(self, expression: exp.Hint) -> str: 438 expressions = [] 439 440 for expression in expression.expressions: 441 if isinstance(expression, exp.Anonymous): 442 formatted_args = self.format_args(*expression.expressions, sep=" ") 443 expressions.append(f"{self.sql(expression, 'this')}({formatted_args})") 444 else: 445 expressions.append(self.sql(expression)) 446 447 return f" /*+ {self.expressions(sqls=expressions, sep=self.QUERY_HINT_SEP).strip()} */" 448 449 def isascii_sql(self, expression: exp.IsAscii) -> str: 450 return f"NVL(REGEXP_LIKE({self.sql(expression.this)}, '^[' || CHR(1) || '-' || CHR(127) || ']*$'), TRUE)" 451 452 def interval_sql(self, expression: exp.Interval) -> str: 453 return f"{'INTERVAL ' if isinstance(expression.this, exp.Literal) else ''}{self.sql(expression, 'this')} {self.sql(expression, 'unit')}" 454 455 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: 456 param_constraint = expression.find(exp.InOutColumnConstraint) 457 if param_constraint: 458 sep = f" {self.sql(param_constraint)} " 459 param_constraint.pop() 460 return super().columndef_sql(expression, sep)
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True: Always quote except for specials cases. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether to normalize identifiers to lowercase. Default: False.
- pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
- indent: The indentation size in a formatted string. For example, this affects the
indentation of subqueries and filters under a
WHEREclause. Default: 2. - normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether to preserve comments in the output SQL code. Default: True
437 def hint_sql(self, expression: exp.Hint) -> str: 438 expressions = [] 439 440 for expression in expression.expressions: 441 if isinstance(expression, exp.Anonymous): 442 formatted_args = self.format_args(*expression.expressions, sep=" ") 443 expressions.append(f"{self.sql(expression, 'this')}({formatted_args})") 444 else: 445 expressions.append(self.sql(expression)) 446 447 return f" /*+ {self.expressions(sqls=expressions, sep=self.QUERY_HINT_SEP).strip()} */"
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- IGNORE_NULLS_IN_FUNC
- EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- SINGLE_STRING_INTERVAL
- INTERVAL_ALLOWS_PLURAL_FORM
- LIMIT_ONLY_LITERALS
- RENAME_TABLE_WITH_DB
- GROUPINGS_SEP
- INDEX_ON
- QUERY_HINTS
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- EXTRACT_ALLOWS_QUOTES
- NVL2_SUPPORTED
- VALUES_AS_TABLE
- UNNEST_WITH_ORDINALITY
- AGGREGATE_FILTER_SUPPORTED
- SEMI_ANTI_JOIN_WITH_SIDE
- COMPUTED_COLUMN_WITH_TYPE
- SUPPORTS_TABLE_COPY
- TABLESAMPLE_REQUIRES_PARENS
- TABLESAMPLE_SIZE_IS_ROWS
- TABLESAMPLE_WITH_METHOD
- TABLESAMPLE_SEED_KEYWORD
- COLLATE_IS_FUNC
- ENSURE_BOOLS
- CTE_RECURSIVE_KEYWORD_REQUIRED
- SUPPORTS_SINGLE_ARG_CONCAT
- SUPPORTS_TABLE_ALIAS_COLUMNS
- UNPIVOT_ALIASES_ARE_IDENTIFIERS
- JSON_KEY_VALUE_PAIR_SEP
- INSERT_OVERWRITE
- SUPPORTS_UNLOGGED_TABLES
- SUPPORTS_CREATE_TABLE_LIKE
- LIKE_PROPERTY_INSIDE_SCHEMA
- MULTI_ARG_DISTINCT
- JSON_TYPE_REQUIRED_FOR_EXTRACTION
- JSON_PATH_BRACKETED_KEY_SUPPORTED
- JSON_PATH_SINGLE_QUOTE_ESCAPE
- SUPPORTED_JSON_PATH_PARTS
- CAN_IMPLEMENT_ARRAY_ANY
- SUPPORTS_TO_NUMBER
- SET_OP_MODIFIERS
- COPY_PARAMS_ARE_WRAPPED
- COPY_PARAMS_EQ_REQUIRED
- COPY_HAS_INTO_KEYWORD
- UNICODE_SUBSTITUTE
- STAR_EXCEPT
- HEX_FUNC
- WITH_PROPERTIES_PREFIX
- QUOTE_JSON_PATH
- PAD_FILL_PATTERN_IS_REQUIRED
- SUPPORTS_EXPLODING_PROJECTIONS
- ARRAY_CONCAT_IS_VAR_LEN
- SUPPORTS_CONVERT_TIMEZONE
- SUPPORTS_MEDIAN
- SUPPORTS_UNIX_SECONDS
- ALTER_SET_WRAPPED
- NORMALIZE_EXTRACT_DATE_PARTS
- PARSE_JSON_NAME
- ARRAY_SIZE_NAME
- ALTER_SET_TYPE
- ARRAY_SIZE_DIM_REQUIRED
- SUPPORTS_BETWEEN_FLAGS
- SUPPORTS_LIKE_QUANTIFIERS
- MATCH_AGAINST_TABLE_PREFIX
- SET_ASSIGNMENT_REQUIRES_VARIABLE_KEYWORD
- UPDATE_STATEMENT_SUPPORTS_FROM
- UNSUPPORTED_TYPES
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- PARAMETER_TOKEN
- NAMED_PLACEHOLDER_TOKEN
- EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
- RESERVED_KEYWORDS
- WITH_SEPARATED_COMMENTS
- EXCLUDE_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- PARAMETERIZABLE_TEXT_TYPES
- EXPRESSIONS_WITHOUT_NESTED_CTES
- RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS
- SAFE_JSON_PATH_KEY_RE
- SENTINEL_LINE_BREAK
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- dialect
- normalize_functions
- unsupported_messages
- generate
- preprocess
- unsupported
- sep
- seg
- sanitize_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_parts
- column_sql
- pseudocolumn_sql
- columnposition_sql
- columnconstraint_sql
- computedcolumnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- generatedasrowcolumnconstraint_sql
- periodforsystemtimeconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- inoutcolumnconstraint_sql
- createable_sql
- create_sql
- sequenceproperties_sql
- clone_sql
- describe_sql
- heredoc_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- unicodestring_sql
- rawstring_sql
- datatypeparam_sql
- datatype_sql
- directory_sql
- delete_sql
- drop_sql
- set_operation
- set_operations
- fetch_sql
- limitoptions_sql
- filter_sql
- indexparameters_sql
- index_sql
- identifier_sql
- hex_sql
- lowerhex_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- with_properties
- locate_properties
- property_name
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- partitionboundspec_sql
- partitionedofproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- withsystemversioningproperty_sql
- insert_sql
- introducer_sql
- kill_sql
- pseudotype_sql
- objectidentifier_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- historicaldata_sql
- table_parts
- table_sql
- tablefromrows_sql
- tablesample_sql
- pivot_sql
- version_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- from_sql
- groupingsets_sql
- rollup_sql
- cube_sql
- group_sql
- having_sql
- connect_sql
- prior_sql
- join_sql
- lambda_sql
- lateral_op
- lateral_sql
- limit_sql
- setitem_sql
- set_sql
- queryband_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- booland_sql
- boolor_sql
- order_sql
- withfill_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognizemeasure_sql
- matchrecognize_sql
- query_modifiers
- options_modifier
- for_modifiers
- offset_limit_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- unnest_sql
- prewhere_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_offset_expressions
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- convert_concat_args
- concat_sql
- concatws_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonpath_sql
- json_path_part
- formatjson_sql
- formatphrase_sql
- jsonobject_sql
- jsonobjectagg_sql
- jsonarray_sql
- jsonarrayagg_sql
- jsoncolumndef_sql
- jsonschema_sql
- jsontable_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- pivotalias_sql
- aliases_sql
- atindex_sql
- attimezone_sql
- fromtimezone_sql
- add_sql
- and_sql
- or_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- strtotime_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- alterindex_sql
- alterdiststyle_sql
- altersortkey_sql
- alterrename_sql
- renamecolumn_sql
- alterset_sql
- alter_sql
- altersession_sql
- droppartition_sql
- addconstraint_sql
- addpartition_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- havingmax_sql
- intdiv_sql
- dpipe_sql
- div_sql
- safedivide_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- propertyeq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- is_sql
- like_sql
- ilike_sql
- match_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- sub_sql
- trycast_sql
- jsoncast_sql
- try_sql
- log_sql
- use_sql
- binary
- ceil_floor
- function_fallback_sql
- func
- format_args
- too_wide
- format_time
- expressions
- op_expressions
- naked_property
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- whens_sql
- merge_sql
- tochar_sql
- tonumber_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- duplicatekeyproperty_sql
- uniquekeyproperty_sql
- distributedbyproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- checkcolumnconstraint_sql
- indexcolumnconstraint_sql
- nvl2_sql
- comprehension_sql
- columnprefix_sql
- opclass_sql
- predict_sql
- generateembedding_sql
- mltranslate_sql
- mlforecast_sql
- featuresattime_sql
- vectorsearch_sql
- forin_sql
- refresh_sql
- toarray_sql
- tsordstotime_sql
- tsordstotimestamp_sql
- tsordstodatetime_sql
- tsordstodate_sql
- unixdate_sql
- lastday_sql
- dateadd_sql
- arrayany_sql
- struct_sql
- partitionrange_sql
- truncatetable_sql
- convert_sql
- copyparameter_sql
- credentials_sql
- copy_sql
- semicolon_sql
- datadeletionproperty_sql
- maskingpolicycolumnconstraint_sql
- gapfill_sql
- scope_resolution
- scoperesolution_sql
- parsejson_sql
- rand_sql
- changes_sql
- pad_sql
- summarize_sql
- explodinggenerateseries_sql
- arrayconcat_sql
- converttimezone_sql
- json_sql
- jsonvalue_sql
- conditionalinsert_sql
- multitableinserts_sql
- oncondition_sql
- jsonextractquote_sql
- jsonexists_sql
- arrayagg_sql
- slice_sql
- apply_sql
- grant_sql
- revoke_sql
- grantprivilege_sql
- grantprincipal_sql
- columns_sql
- overlay_sql
- todouble_sql
- string_sql
- median_sql
- overflowtruncatebehavior_sql
- unixseconds_sql
- arraysize_sql
- attach_sql
- detach_sql
- attachoption_sql
- watermarkcolumnconstraint_sql
- encodeproperty_sql
- includeproperty_sql
- xmlelement_sql
- xmlkeyvalueoption_sql
- partitionbyrangeproperty_sql
- partitionbyrangepropertydynamic_sql
- unpivotcolumns_sql
- analyzesample_sql
- analyzestatistics_sql
- analyzehistogram_sql
- analyzedelete_sql
- analyzelistchainedrows_sql
- analyzevalidate_sql
- analyze_sql
- xmltable_sql
- xmlnamespace_sql
- export_sql
- declare_sql
- declareitem_sql
- recursivewithsearch_sql
- parameterizedagg_sql
- anonymousaggfunc_sql
- combinedaggfunc_sql
- combinedparameterizedagg_sql
- show_sql
- install_sql
- get_put_sql
- translatecharacters_sql
- decodecase_sql
- semanticview_sql
- getextract_sql
- datefromunixdate_sql
- space_sql
- buildproperty_sql
- refreshtriggerproperty_sql
- modelattribute_sql
- directorystage_sql
- uuid_sql
- initcap_sql
- localtime_sql
- localtimestamp_sql
- weekstart_sql
- chr_sql