sqlglot.dialects.oracle
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, generator, parser, tokens, transforms 6from sqlglot.dialects.dialect import ( 7 Dialect, 8 NormalizationStrategy, 9 build_formatted_time, 10 build_timetostr_or_tochar, 11 build_trunc, 12 no_ilike_sql, 13 rename_func, 14 strposition_sql, 15 to_number_with_nls_param, 16 trim_sql, 17) 18from sqlglot.helper import seq_get 19from sqlglot.parser import OPTIONS_TYPE, build_coalesce 20from sqlglot.tokens import TokenType 21 22if t.TYPE_CHECKING: 23 from sqlglot._typing import E 24 25 26def _trim_sql(self: Oracle.Generator, expression: exp.Trim) -> str: 27 position = expression.args.get("position") 28 29 if position and position.upper() in ("LEADING", "TRAILING"): 30 return self.trim_sql(expression) 31 32 return trim_sql(self, expression) 33 34 35def _build_to_timestamp(args: t.List) -> exp.StrToTime | exp.Anonymous: 36 if len(args) == 1: 37 return exp.Anonymous(this="TO_TIMESTAMP", expressions=args) 38 39 return build_formatted_time(exp.StrToTime, "oracle")(args) 40 41 42class Oracle(Dialect): 43 ALIAS_POST_TABLESAMPLE = True 44 LOCKING_READS_SUPPORTED = True 45 TABLESAMPLE_SIZE_IS_PERCENT = True 46 NULL_ORDERING = "nulls_are_large" 47 ON_CONDITION_EMPTY_BEFORE_ERROR = False 48 ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False 49 DISABLES_ALIAS_REF_EXPANSION = True 50 51 # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm 52 NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE 53 54 # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212 55 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes 56 TIME_MAPPING = { 57 "D": "%u", # Day of week (1-7) 58 "DAY": "%A", # name of day 59 "DD": "%d", # day of month (1-31) 60 "DDD": "%j", # day of year (1-366) 61 "DY": "%a", # abbreviated name of day 62 "HH": "%I", # Hour of day (1-12) 63 "HH12": "%I", # alias for HH 64 "HH24": "%H", # Hour of day (0-23) 65 "IW": "%V", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard 66 "MI": "%M", # Minute (0-59) 67 "MM": "%m", # Month (01-12; January = 01) 68 "MON": "%b", # Abbreviated name of month 69 "MONTH": "%B", # Name of month 70 "SS": "%S", # Second (0-59) 71 "WW": "%W", # Week of year (1-53) 72 "YY": "%y", # 15 73 "YYYY": "%Y", # 2015 74 "FF6": "%f", # only 6 digits are supported in python formats 75 } 76 77 PSEUDOCOLUMNS = {"ROWNUM", "ROWID", "OBJECT_ID", "OBJECT_VALUE", "LEVEL"} 78 79 def can_quote(self, identifier: exp.Identifier, identify: str | bool = "safe") -> bool: 80 # Disable quoting for pseudocolumns as it may break queries e.g 81 # `WHERE "ROWNUM" = ...` does not work but `WHERE ROWNUM = ...` does 82 return ( 83 identifier.quoted or not isinstance(identifier.parent, exp.Pseudocolumn) 84 ) and super().can_quote(identifier, identify=identify) 85 86 class Tokenizer(tokens.Tokenizer): 87 VAR_SINGLE_TOKENS = {"@", "$", "#"} 88 89 UNICODE_STRINGS = [ 90 (prefix + q, q) 91 for q in t.cast(t.List[str], tokens.Tokenizer.QUOTES) 92 for prefix in ("U", "u") 93 ] 94 95 NESTED_COMMENTS = False 96 97 KEYWORDS = { 98 **tokens.Tokenizer.KEYWORDS, 99 "(+)": TokenType.JOIN_MARKER, 100 "BINARY_DOUBLE": TokenType.DOUBLE, 101 "BINARY_FLOAT": TokenType.FLOAT, 102 "BULK COLLECT INTO": TokenType.BULK_COLLECT_INTO, 103 "COLUMNS": TokenType.COLUMN, 104 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, 105 "MINUS": TokenType.EXCEPT, 106 "NVARCHAR2": TokenType.NVARCHAR, 107 "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY, 108 "SAMPLE": TokenType.TABLE_SAMPLE, 109 "START": TokenType.BEGIN, 110 "TOP": TokenType.TOP, 111 "VARCHAR2": TokenType.VARCHAR, 112 "SYSTIMESTAMP": TokenType.SYSTIMESTAMP, 113 } 114 115 class Parser(parser.Parser): 116 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP} 117 VALUES_FOLLOWED_BY_PAREN = False 118 119 FUNCTIONS = { 120 **parser.Parser.FUNCTIONS, 121 "CONVERT": exp.ConvertToCharset.from_arg_list, 122 "L2_DISTANCE": exp.EuclideanDistance.from_arg_list, 123 "NVL": lambda args: build_coalesce(args, is_nvl=True), 124 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), 125 "TO_CHAR": build_timetostr_or_tochar, 126 "TO_TIMESTAMP": _build_to_timestamp, 127 "TO_DATE": build_formatted_time(exp.StrToDate, "oracle"), 128 "TRUNC": lambda args, dialect: build_trunc( 129 args, dialect, date_trunc_unabbreviate=False, default_date_trunc_unit="DD" 130 ), 131 } 132 FUNCTIONS.pop("TO_BOOLEAN") 133 134 NO_PAREN_FUNCTION_PARSERS = { 135 **parser.Parser.NO_PAREN_FUNCTION_PARSERS, 136 "NEXT": lambda self: self._parse_next_value_for(), 137 "PRIOR": lambda self: self.expression(exp.Prior, this=self._parse_bitwise()), 138 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 139 "DBMS_RANDOM": lambda self: self._parse_dbms_random(), 140 } 141 142 NO_PAREN_FUNCTIONS = { 143 **parser.Parser.NO_PAREN_FUNCTIONS, 144 TokenType.SYSTIMESTAMP: exp.Systimestamp, 145 } 146 147 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { 148 **parser.Parser.FUNCTION_PARSERS, 149 "JSON_ARRAY": lambda self: self._parse_json_array( 150 exp.JSONArray, 151 expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())), 152 ), 153 "JSON_ARRAYAGG": lambda self: self._parse_json_array( 154 exp.JSONArrayAgg, 155 this=self._parse_format_json(self._parse_bitwise()), 156 order=self._parse_order(), 157 ), 158 "JSON_EXISTS": lambda self: self._parse_json_exists(), 159 } 160 FUNCTION_PARSERS.pop("CONVERT") 161 162 PROPERTY_PARSERS = { 163 **parser.Parser.PROPERTY_PARSERS, 164 "GLOBAL": lambda self: self._match_text_seq("TEMPORARY") 165 and self.expression(exp.TemporaryProperty, this="GLOBAL"), 166 "PRIVATE": lambda self: self._match_text_seq("TEMPORARY") 167 and self.expression(exp.TemporaryProperty, this="PRIVATE"), 168 "FORCE": lambda self: self.expression(exp.ForceProperty), 169 } 170 171 QUERY_MODIFIER_PARSERS = { 172 **parser.Parser.QUERY_MODIFIER_PARSERS, 173 TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()), 174 TokenType.WITH: lambda self: ("options", [self._parse_query_restrictions()]), 175 } 176 177 TYPE_LITERAL_PARSERS = { 178 exp.DataType.Type.DATE: lambda self, this, _: self.expression( 179 exp.DateStrToDate, this=this 180 ), 181 # https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/NLS_TIMESTAMP_FORMAT.html 182 exp.DataType.Type.TIMESTAMP: lambda self, this, _: _build_to_timestamp( 183 [this, '"%Y-%m-%d %H:%M:%S.%f"'] 184 ), 185 } 186 187 # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT .. 188 # Reference: https://stackoverflow.com/a/336455 189 DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE} 190 191 QUERY_RESTRICTIONS: OPTIONS_TYPE = { 192 "WITH": ( 193 ("READ", "ONLY"), 194 ("CHECK", "OPTION"), 195 ), 196 } 197 198 def _parse_dbms_random(self) -> t.Optional[exp.Expression]: 199 if self._match_text_seq(".", "VALUE"): 200 lower, upper = None, None 201 if self._match(TokenType.L_PAREN, advance=False): 202 lower_upper = self._parse_wrapped_csv(self._parse_bitwise) 203 if len(lower_upper) == 2: 204 lower, upper = lower_upper 205 206 return exp.Rand(lower=lower, upper=upper) 207 208 self._retreat(self._index - 1) 209 return None 210 211 def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E: 212 return self.expression( 213 expr_type, 214 null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"), 215 return_type=self._match_text_seq("RETURNING") and self._parse_type(), 216 strict=self._match_text_seq("STRICT"), 217 **kwargs, 218 ) 219 220 def _parse_hint_function_call(self) -> t.Optional[exp.Expression]: 221 if not self._curr or not self._next or self._next.token_type != TokenType.L_PAREN: 222 return None 223 224 this = self._curr.text 225 226 self._advance(2) 227 args = self._parse_hint_args() 228 this = self.expression(exp.Anonymous, this=this, expressions=args) 229 self._match_r_paren(this) 230 return this 231 232 def _parse_hint_args(self): 233 args = [] 234 result = self._parse_var() 235 236 while result: 237 args.append(result) 238 result = self._parse_var() 239 240 return args 241 242 def _parse_query_restrictions(self) -> t.Optional[exp.Expression]: 243 kind = self._parse_var_from_options(self.QUERY_RESTRICTIONS, raise_unmatched=False) 244 245 if not kind: 246 return None 247 248 return self.expression( 249 exp.QueryOption, 250 this=kind, 251 expression=self._match(TokenType.CONSTRAINT) and self._parse_field(), 252 ) 253 254 def _parse_json_exists(self) -> exp.JSONExists: 255 this = self._parse_format_json(self._parse_bitwise()) 256 self._match(TokenType.COMMA) 257 return self.expression( 258 exp.JSONExists, 259 this=this, 260 path=self.dialect.to_json_path(self._parse_bitwise()), 261 passing=self._match_text_seq("PASSING") 262 and self._parse_csv(lambda: self._parse_alias(self._parse_bitwise())), 263 on_condition=self._parse_on_condition(), 264 ) 265 266 def _parse_into(self) -> t.Optional[exp.Into]: 267 # https://docs.oracle.com/en/database/oracle/oracle-database/19/lnpls/SELECT-INTO-statement.html 268 bulk_collect = self._match(TokenType.BULK_COLLECT_INTO) 269 if not bulk_collect and not self._match(TokenType.INTO): 270 return None 271 272 index = self._index 273 274 expressions = self._parse_expressions() 275 if len(expressions) == 1: 276 self._retreat(index) 277 self._match(TokenType.TABLE) 278 return self.expression( 279 exp.Into, this=self._parse_table(schema=True), bulk_collect=bulk_collect 280 ) 281 282 return self.expression(exp.Into, bulk_collect=bulk_collect, expressions=expressions) 283 284 def _parse_connect_with_prior(self): 285 return self._parse_assignment() 286 287 def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: 288 this = super()._parse_column_ops(this) 289 290 if not this: 291 return this 292 293 index = self._index 294 295 # https://docs.oracle.com/en/database/oracle/oracle-database/26/sqlrf/Interval-Expressions.html 296 interval_span = self._try_parse(lambda: self._parse_interval_span(this)) 297 if interval_span and isinstance(interval_span.args.get("unit"), exp.IntervalSpan): 298 return interval_span 299 300 self._retreat(index) 301 return this 302 303 def _parse_insert_table(self) -> t.Optional[exp.Expression]: 304 # Oracle does not use AS for INSERT INTO alias 305 # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/INSERT.html 306 # Parse table parts without schema to avoid parsing the alias with its columns 307 this = self._parse_table_parts(schema=True) 308 309 if isinstance(this, exp.Table): 310 alias_name = self._parse_id_var(any_token=False) 311 if alias_name: 312 this.set("alias", exp.TableAlias(this=alias_name)) 313 314 this.set("partition", self._parse_partition()) 315 316 # Now parse the schema (column list) if present 317 return self._parse_schema(this=this) 318 319 return this 320 321 class Generator(generator.Generator): 322 LOCKING_READS_SUPPORTED = True 323 JOIN_HINTS = False 324 TABLE_HINTS = False 325 DATA_TYPE_SPECIFIERS_ALLOWED = True 326 ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False 327 LIMIT_FETCH = "FETCH" 328 TABLESAMPLE_KEYWORDS = "SAMPLE" 329 LAST_DAY_SUPPORTS_DATE_PART = False 330 SUPPORTS_SELECT_INTO = True 331 TZ_TO_WITH_TIME_ZONE = True 332 SUPPORTS_WINDOW_EXCLUDE = True 333 QUERY_HINT_SEP = " " 334 SUPPORTS_DECODE_CASE = True 335 336 TYPE_MAPPING = { 337 **generator.Generator.TYPE_MAPPING, 338 exp.DataType.Type.TINYINT: "SMALLINT", 339 exp.DataType.Type.SMALLINT: "SMALLINT", 340 exp.DataType.Type.INT: "INT", 341 exp.DataType.Type.BIGINT: "INT", 342 exp.DataType.Type.DECIMAL: "NUMBER", 343 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", 344 exp.DataType.Type.VARCHAR: "VARCHAR2", 345 exp.DataType.Type.NVARCHAR: "NVARCHAR2", 346 exp.DataType.Type.NCHAR: "NCHAR", 347 exp.DataType.Type.TEXT: "CLOB", 348 exp.DataType.Type.TIMETZ: "TIME", 349 exp.DataType.Type.TIMESTAMPNTZ: "TIMESTAMP", 350 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 351 exp.DataType.Type.BINARY: "BLOB", 352 exp.DataType.Type.VARBINARY: "BLOB", 353 exp.DataType.Type.ROWVERSION: "BLOB", 354 } 355 TYPE_MAPPING.pop(exp.DataType.Type.BLOB) 356 357 TRANSFORMS = { 358 **generator.Generator.TRANSFORMS, 359 exp.DateStrToDate: lambda self, e: self.func( 360 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD") 361 ), 362 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.unit), 363 exp.EuclideanDistance: rename_func("L2_DISTANCE"), 364 exp.ILike: no_ilike_sql, 365 exp.LogicalOr: rename_func("MAX"), 366 exp.LogicalAnd: rename_func("MIN"), 367 exp.Mod: rename_func("MOD"), 368 exp.Rand: rename_func("DBMS_RANDOM.VALUE"), 369 exp.Select: transforms.preprocess( 370 [ 371 transforms.eliminate_distinct_on, 372 transforms.eliminate_qualify, 373 ] 374 ), 375 exp.StrPosition: lambda self, e: ( 376 strposition_sql( 377 self, e, func_name="INSTR", supports_position=True, supports_occurrence=True 378 ) 379 ), 380 exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)), 381 exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)), 382 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "), 383 exp.Substring: rename_func("SUBSTR"), 384 exp.Table: lambda self, e: self.table_sql(e, sep=" "), 385 exp.TableSample: lambda self, e: self.tablesample_sql(e), 386 exp.TemporaryProperty: lambda _, e: f"{e.name or 'GLOBAL'} TEMPORARY", 387 exp.TimeToStr: lambda self, e: self.func("TO_CHAR", e.this, self.format_time(e)), 388 exp.ToChar: lambda self, e: self.function_fallback_sql(e), 389 exp.ToNumber: to_number_with_nls_param, 390 exp.Trim: _trim_sql, 391 exp.Unicode: lambda self, e: f"ASCII(UNISTR({self.sql(e.this)}))", 392 exp.UnixToTime: lambda self, 393 e: f"TO_DATE('1970-01-01', 'YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)", 394 exp.UtcTimestamp: rename_func("UTC_TIMESTAMP"), 395 exp.UtcTime: rename_func("UTC_TIME"), 396 exp.Systimestamp: lambda self, e: "SYSTIMESTAMP", 397 } 398 399 PROPERTIES_LOCATION = { 400 **generator.Generator.PROPERTIES_LOCATION, 401 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 402 } 403 404 def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str: 405 if expression.args.get("sysdate"): 406 return "SYSDATE" 407 408 this = expression.this 409 return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP" 410 411 def offset_sql(self, expression: exp.Offset) -> str: 412 return f"{super().offset_sql(expression)} ROWS" 413 414 def add_column_sql(self, expression: exp.Expression) -> str: 415 return f"ADD {self.sql(expression)}" 416 417 def queryoption_sql(self, expression: exp.QueryOption) -> str: 418 option = self.sql(expression, "this") 419 value = self.sql(expression, "expression") 420 value = f" CONSTRAINT {value}" if value else "" 421 422 return f"{option}{value}" 423 424 def coalesce_sql(self, expression: exp.Coalesce) -> str: 425 func_name = "NVL" if expression.args.get("is_nvl") else "COALESCE" 426 return rename_func(func_name)(self, expression) 427 428 def into_sql(self, expression: exp.Into) -> str: 429 into = "INTO" if not expression.args.get("bulk_collect") else "BULK COLLECT INTO" 430 if expression.this: 431 return f"{self.seg(into)} {self.sql(expression, 'this')}" 432 433 return f"{self.seg(into)} {self.expressions(expression)}" 434 435 def hint_sql(self, expression: exp.Hint) -> str: 436 expressions = [] 437 438 for expression in expression.expressions: 439 if isinstance(expression, exp.Anonymous): 440 formatted_args = self.format_args(*expression.expressions, sep=" ") 441 expressions.append(f"{self.sql(expression, 'this')}({formatted_args})") 442 else: 443 expressions.append(self.sql(expression)) 444 445 return f" /*+ {self.expressions(sqls=expressions, sep=self.QUERY_HINT_SEP).strip()} */" 446 447 def isascii_sql(self, expression: exp.IsAscii) -> str: 448 return f"NVL(REGEXP_LIKE({self.sql(expression.this)}, '^[' || CHR(1) || '-' || CHR(127) || ']*$'), TRUE)" 449 450 def interval_sql(self, expression: exp.Interval) -> str: 451 return f"{'INTERVAL ' if isinstance(expression.this, exp.Literal) else ''}{self.sql(expression, 'this')} {self.sql(expression, 'unit')}" 452 453 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: 454 param_constraint = expression.find(exp.InOutColumnConstraint) 455 if param_constraint: 456 sep = f" {self.sql(param_constraint)} " 457 param_constraint.pop() 458 return super().columndef_sql(expression, sep)
43class Oracle(Dialect): 44 ALIAS_POST_TABLESAMPLE = True 45 LOCKING_READS_SUPPORTED = True 46 TABLESAMPLE_SIZE_IS_PERCENT = True 47 NULL_ORDERING = "nulls_are_large" 48 ON_CONDITION_EMPTY_BEFORE_ERROR = False 49 ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False 50 DISABLES_ALIAS_REF_EXPANSION = True 51 52 # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm 53 NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE 54 55 # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212 56 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes 57 TIME_MAPPING = { 58 "D": "%u", # Day of week (1-7) 59 "DAY": "%A", # name of day 60 "DD": "%d", # day of month (1-31) 61 "DDD": "%j", # day of year (1-366) 62 "DY": "%a", # abbreviated name of day 63 "HH": "%I", # Hour of day (1-12) 64 "HH12": "%I", # alias for HH 65 "HH24": "%H", # Hour of day (0-23) 66 "IW": "%V", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard 67 "MI": "%M", # Minute (0-59) 68 "MM": "%m", # Month (01-12; January = 01) 69 "MON": "%b", # Abbreviated name of month 70 "MONTH": "%B", # Name of month 71 "SS": "%S", # Second (0-59) 72 "WW": "%W", # Week of year (1-53) 73 "YY": "%y", # 15 74 "YYYY": "%Y", # 2015 75 "FF6": "%f", # only 6 digits are supported in python formats 76 } 77 78 PSEUDOCOLUMNS = {"ROWNUM", "ROWID", "OBJECT_ID", "OBJECT_VALUE", "LEVEL"} 79 80 def can_quote(self, identifier: exp.Identifier, identify: str | bool = "safe") -> bool: 81 # Disable quoting for pseudocolumns as it may break queries e.g 82 # `WHERE "ROWNUM" = ...` does not work but `WHERE ROWNUM = ...` does 83 return ( 84 identifier.quoted or not isinstance(identifier.parent, exp.Pseudocolumn) 85 ) and super().can_quote(identifier, identify=identify) 86 87 class Tokenizer(tokens.Tokenizer): 88 VAR_SINGLE_TOKENS = {"@", "$", "#"} 89 90 UNICODE_STRINGS = [ 91 (prefix + q, q) 92 for q in t.cast(t.List[str], tokens.Tokenizer.QUOTES) 93 for prefix in ("U", "u") 94 ] 95 96 NESTED_COMMENTS = False 97 98 KEYWORDS = { 99 **tokens.Tokenizer.KEYWORDS, 100 "(+)": TokenType.JOIN_MARKER, 101 "BINARY_DOUBLE": TokenType.DOUBLE, 102 "BINARY_FLOAT": TokenType.FLOAT, 103 "BULK COLLECT INTO": TokenType.BULK_COLLECT_INTO, 104 "COLUMNS": TokenType.COLUMN, 105 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, 106 "MINUS": TokenType.EXCEPT, 107 "NVARCHAR2": TokenType.NVARCHAR, 108 "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY, 109 "SAMPLE": TokenType.TABLE_SAMPLE, 110 "START": TokenType.BEGIN, 111 "TOP": TokenType.TOP, 112 "VARCHAR2": TokenType.VARCHAR, 113 "SYSTIMESTAMP": TokenType.SYSTIMESTAMP, 114 } 115 116 class Parser(parser.Parser): 117 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP} 118 VALUES_FOLLOWED_BY_PAREN = False 119 120 FUNCTIONS = { 121 **parser.Parser.FUNCTIONS, 122 "CONVERT": exp.ConvertToCharset.from_arg_list, 123 "L2_DISTANCE": exp.EuclideanDistance.from_arg_list, 124 "NVL": lambda args: build_coalesce(args, is_nvl=True), 125 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), 126 "TO_CHAR": build_timetostr_or_tochar, 127 "TO_TIMESTAMP": _build_to_timestamp, 128 "TO_DATE": build_formatted_time(exp.StrToDate, "oracle"), 129 "TRUNC": lambda args, dialect: build_trunc( 130 args, dialect, date_trunc_unabbreviate=False, default_date_trunc_unit="DD" 131 ), 132 } 133 FUNCTIONS.pop("TO_BOOLEAN") 134 135 NO_PAREN_FUNCTION_PARSERS = { 136 **parser.Parser.NO_PAREN_FUNCTION_PARSERS, 137 "NEXT": lambda self: self._parse_next_value_for(), 138 "PRIOR": lambda self: self.expression(exp.Prior, this=self._parse_bitwise()), 139 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 140 "DBMS_RANDOM": lambda self: self._parse_dbms_random(), 141 } 142 143 NO_PAREN_FUNCTIONS = { 144 **parser.Parser.NO_PAREN_FUNCTIONS, 145 TokenType.SYSTIMESTAMP: exp.Systimestamp, 146 } 147 148 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { 149 **parser.Parser.FUNCTION_PARSERS, 150 "JSON_ARRAY": lambda self: self._parse_json_array( 151 exp.JSONArray, 152 expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())), 153 ), 154 "JSON_ARRAYAGG": lambda self: self._parse_json_array( 155 exp.JSONArrayAgg, 156 this=self._parse_format_json(self._parse_bitwise()), 157 order=self._parse_order(), 158 ), 159 "JSON_EXISTS": lambda self: self._parse_json_exists(), 160 } 161 FUNCTION_PARSERS.pop("CONVERT") 162 163 PROPERTY_PARSERS = { 164 **parser.Parser.PROPERTY_PARSERS, 165 "GLOBAL": lambda self: self._match_text_seq("TEMPORARY") 166 and self.expression(exp.TemporaryProperty, this="GLOBAL"), 167 "PRIVATE": lambda self: self._match_text_seq("TEMPORARY") 168 and self.expression(exp.TemporaryProperty, this="PRIVATE"), 169 "FORCE": lambda self: self.expression(exp.ForceProperty), 170 } 171 172 QUERY_MODIFIER_PARSERS = { 173 **parser.Parser.QUERY_MODIFIER_PARSERS, 174 TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()), 175 TokenType.WITH: lambda self: ("options", [self._parse_query_restrictions()]), 176 } 177 178 TYPE_LITERAL_PARSERS = { 179 exp.DataType.Type.DATE: lambda self, this, _: self.expression( 180 exp.DateStrToDate, this=this 181 ), 182 # https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/NLS_TIMESTAMP_FORMAT.html 183 exp.DataType.Type.TIMESTAMP: lambda self, this, _: _build_to_timestamp( 184 [this, '"%Y-%m-%d %H:%M:%S.%f"'] 185 ), 186 } 187 188 # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT .. 189 # Reference: https://stackoverflow.com/a/336455 190 DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE} 191 192 QUERY_RESTRICTIONS: OPTIONS_TYPE = { 193 "WITH": ( 194 ("READ", "ONLY"), 195 ("CHECK", "OPTION"), 196 ), 197 } 198 199 def _parse_dbms_random(self) -> t.Optional[exp.Expression]: 200 if self._match_text_seq(".", "VALUE"): 201 lower, upper = None, None 202 if self._match(TokenType.L_PAREN, advance=False): 203 lower_upper = self._parse_wrapped_csv(self._parse_bitwise) 204 if len(lower_upper) == 2: 205 lower, upper = lower_upper 206 207 return exp.Rand(lower=lower, upper=upper) 208 209 self._retreat(self._index - 1) 210 return None 211 212 def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E: 213 return self.expression( 214 expr_type, 215 null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"), 216 return_type=self._match_text_seq("RETURNING") and self._parse_type(), 217 strict=self._match_text_seq("STRICT"), 218 **kwargs, 219 ) 220 221 def _parse_hint_function_call(self) -> t.Optional[exp.Expression]: 222 if not self._curr or not self._next or self._next.token_type != TokenType.L_PAREN: 223 return None 224 225 this = self._curr.text 226 227 self._advance(2) 228 args = self._parse_hint_args() 229 this = self.expression(exp.Anonymous, this=this, expressions=args) 230 self._match_r_paren(this) 231 return this 232 233 def _parse_hint_args(self): 234 args = [] 235 result = self._parse_var() 236 237 while result: 238 args.append(result) 239 result = self._parse_var() 240 241 return args 242 243 def _parse_query_restrictions(self) -> t.Optional[exp.Expression]: 244 kind = self._parse_var_from_options(self.QUERY_RESTRICTIONS, raise_unmatched=False) 245 246 if not kind: 247 return None 248 249 return self.expression( 250 exp.QueryOption, 251 this=kind, 252 expression=self._match(TokenType.CONSTRAINT) and self._parse_field(), 253 ) 254 255 def _parse_json_exists(self) -> exp.JSONExists: 256 this = self._parse_format_json(self._parse_bitwise()) 257 self._match(TokenType.COMMA) 258 return self.expression( 259 exp.JSONExists, 260 this=this, 261 path=self.dialect.to_json_path(self._parse_bitwise()), 262 passing=self._match_text_seq("PASSING") 263 and self._parse_csv(lambda: self._parse_alias(self._parse_bitwise())), 264 on_condition=self._parse_on_condition(), 265 ) 266 267 def _parse_into(self) -> t.Optional[exp.Into]: 268 # https://docs.oracle.com/en/database/oracle/oracle-database/19/lnpls/SELECT-INTO-statement.html 269 bulk_collect = self._match(TokenType.BULK_COLLECT_INTO) 270 if not bulk_collect and not self._match(TokenType.INTO): 271 return None 272 273 index = self._index 274 275 expressions = self._parse_expressions() 276 if len(expressions) == 1: 277 self._retreat(index) 278 self._match(TokenType.TABLE) 279 return self.expression( 280 exp.Into, this=self._parse_table(schema=True), bulk_collect=bulk_collect 281 ) 282 283 return self.expression(exp.Into, bulk_collect=bulk_collect, expressions=expressions) 284 285 def _parse_connect_with_prior(self): 286 return self._parse_assignment() 287 288 def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: 289 this = super()._parse_column_ops(this) 290 291 if not this: 292 return this 293 294 index = self._index 295 296 # https://docs.oracle.com/en/database/oracle/oracle-database/26/sqlrf/Interval-Expressions.html 297 interval_span = self._try_parse(lambda: self._parse_interval_span(this)) 298 if interval_span and isinstance(interval_span.args.get("unit"), exp.IntervalSpan): 299 return interval_span 300 301 self._retreat(index) 302 return this 303 304 def _parse_insert_table(self) -> t.Optional[exp.Expression]: 305 # Oracle does not use AS for INSERT INTO alias 306 # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/INSERT.html 307 # Parse table parts without schema to avoid parsing the alias with its columns 308 this = self._parse_table_parts(schema=True) 309 310 if isinstance(this, exp.Table): 311 alias_name = self._parse_id_var(any_token=False) 312 if alias_name: 313 this.set("alias", exp.TableAlias(this=alias_name)) 314 315 this.set("partition", self._parse_partition()) 316 317 # Now parse the schema (column list) if present 318 return self._parse_schema(this=this) 319 320 return this 321 322 class Generator(generator.Generator): 323 LOCKING_READS_SUPPORTED = True 324 JOIN_HINTS = False 325 TABLE_HINTS = False 326 DATA_TYPE_SPECIFIERS_ALLOWED = True 327 ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False 328 LIMIT_FETCH = "FETCH" 329 TABLESAMPLE_KEYWORDS = "SAMPLE" 330 LAST_DAY_SUPPORTS_DATE_PART = False 331 SUPPORTS_SELECT_INTO = True 332 TZ_TO_WITH_TIME_ZONE = True 333 SUPPORTS_WINDOW_EXCLUDE = True 334 QUERY_HINT_SEP = " " 335 SUPPORTS_DECODE_CASE = True 336 337 TYPE_MAPPING = { 338 **generator.Generator.TYPE_MAPPING, 339 exp.DataType.Type.TINYINT: "SMALLINT", 340 exp.DataType.Type.SMALLINT: "SMALLINT", 341 exp.DataType.Type.INT: "INT", 342 exp.DataType.Type.BIGINT: "INT", 343 exp.DataType.Type.DECIMAL: "NUMBER", 344 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", 345 exp.DataType.Type.VARCHAR: "VARCHAR2", 346 exp.DataType.Type.NVARCHAR: "NVARCHAR2", 347 exp.DataType.Type.NCHAR: "NCHAR", 348 exp.DataType.Type.TEXT: "CLOB", 349 exp.DataType.Type.TIMETZ: "TIME", 350 exp.DataType.Type.TIMESTAMPNTZ: "TIMESTAMP", 351 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 352 exp.DataType.Type.BINARY: "BLOB", 353 exp.DataType.Type.VARBINARY: "BLOB", 354 exp.DataType.Type.ROWVERSION: "BLOB", 355 } 356 TYPE_MAPPING.pop(exp.DataType.Type.BLOB) 357 358 TRANSFORMS = { 359 **generator.Generator.TRANSFORMS, 360 exp.DateStrToDate: lambda self, e: self.func( 361 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD") 362 ), 363 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.unit), 364 exp.EuclideanDistance: rename_func("L2_DISTANCE"), 365 exp.ILike: no_ilike_sql, 366 exp.LogicalOr: rename_func("MAX"), 367 exp.LogicalAnd: rename_func("MIN"), 368 exp.Mod: rename_func("MOD"), 369 exp.Rand: rename_func("DBMS_RANDOM.VALUE"), 370 exp.Select: transforms.preprocess( 371 [ 372 transforms.eliminate_distinct_on, 373 transforms.eliminate_qualify, 374 ] 375 ), 376 exp.StrPosition: lambda self, e: ( 377 strposition_sql( 378 self, e, func_name="INSTR", supports_position=True, supports_occurrence=True 379 ) 380 ), 381 exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)), 382 exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)), 383 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "), 384 exp.Substring: rename_func("SUBSTR"), 385 exp.Table: lambda self, e: self.table_sql(e, sep=" "), 386 exp.TableSample: lambda self, e: self.tablesample_sql(e), 387 exp.TemporaryProperty: lambda _, e: f"{e.name or 'GLOBAL'} TEMPORARY", 388 exp.TimeToStr: lambda self, e: self.func("TO_CHAR", e.this, self.format_time(e)), 389 exp.ToChar: lambda self, e: self.function_fallback_sql(e), 390 exp.ToNumber: to_number_with_nls_param, 391 exp.Trim: _trim_sql, 392 exp.Unicode: lambda self, e: f"ASCII(UNISTR({self.sql(e.this)}))", 393 exp.UnixToTime: lambda self, 394 e: f"TO_DATE('1970-01-01', 'YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)", 395 exp.UtcTimestamp: rename_func("UTC_TIMESTAMP"), 396 exp.UtcTime: rename_func("UTC_TIME"), 397 exp.Systimestamp: lambda self, e: "SYSTIMESTAMP", 398 } 399 400 PROPERTIES_LOCATION = { 401 **generator.Generator.PROPERTIES_LOCATION, 402 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 403 } 404 405 def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str: 406 if expression.args.get("sysdate"): 407 return "SYSDATE" 408 409 this = expression.this 410 return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP" 411 412 def offset_sql(self, expression: exp.Offset) -> str: 413 return f"{super().offset_sql(expression)} ROWS" 414 415 def add_column_sql(self, expression: exp.Expression) -> str: 416 return f"ADD {self.sql(expression)}" 417 418 def queryoption_sql(self, expression: exp.QueryOption) -> str: 419 option = self.sql(expression, "this") 420 value = self.sql(expression, "expression") 421 value = f" CONSTRAINT {value}" if value else "" 422 423 return f"{option}{value}" 424 425 def coalesce_sql(self, expression: exp.Coalesce) -> str: 426 func_name = "NVL" if expression.args.get("is_nvl") else "COALESCE" 427 return rename_func(func_name)(self, expression) 428 429 def into_sql(self, expression: exp.Into) -> str: 430 into = "INTO" if not expression.args.get("bulk_collect") else "BULK COLLECT INTO" 431 if expression.this: 432 return f"{self.seg(into)} {self.sql(expression, 'this')}" 433 434 return f"{self.seg(into)} {self.expressions(expression)}" 435 436 def hint_sql(self, expression: exp.Hint) -> str: 437 expressions = [] 438 439 for expression in expression.expressions: 440 if isinstance(expression, exp.Anonymous): 441 formatted_args = self.format_args(*expression.expressions, sep=" ") 442 expressions.append(f"{self.sql(expression, 'this')}({formatted_args})") 443 else: 444 expressions.append(self.sql(expression)) 445 446 return f" /*+ {self.expressions(sqls=expressions, sep=self.QUERY_HINT_SEP).strip()} */" 447 448 def isascii_sql(self, expression: exp.IsAscii) -> str: 449 return f"NVL(REGEXP_LIKE({self.sql(expression.this)}, '^[' || CHR(1) || '-' || CHR(127) || ']*$'), TRUE)" 450 451 def interval_sql(self, expression: exp.Interval) -> str: 452 return f"{'INTERVAL ' if isinstance(expression.this, exp.Literal) else ''}{self.sql(expression, 'this')} {self.sql(expression, 'unit')}" 453 454 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: 455 param_constraint = expression.find(exp.InOutColumnConstraint) 456 if param_constraint: 457 sep = f" {self.sql(param_constraint)} " 458 param_constraint.pop() 459 return super().columndef_sql(expression, sep)
Default NULL ordering method to use if not explicitly set.
Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"
Whether "X ON EMPTY" should come before "X ON ERROR" (for dialects like T-SQL, MySQL, Oracle).
Whether alias reference expansion is disabled for this dialect.
Some dialects like Oracle do NOT support referencing aliases in projections or WHERE clauses. The original expression must be repeated instead.
For example, in Oracle: SELECT y.foo AS bar, bar * 2 AS baz FROM y -- INVALID SELECT y.foo AS bar, y.foo * 2 AS baz FROM y -- VALID
Specifies the strategy according to which identifiers should be normalized.
Associates this dialect's time formats with their equivalent Python strftime formats.
Columns that are auto-generated by the engine corresponding to this dialect.
For example, such columns may be excluded from SELECT * queries.
80 def can_quote(self, identifier: exp.Identifier, identify: str | bool = "safe") -> bool: 81 # Disable quoting for pseudocolumns as it may break queries e.g 82 # `WHERE "ROWNUM" = ...` does not work but `WHERE ROWNUM = ...` does 83 return ( 84 identifier.quoted or not isinstance(identifier.parent, exp.Pseudocolumn) 85 ) and super().can_quote(identifier, identify=identify)
Checks if an identifier can be quoted
Arguments:
- identifier: The identifier to check.
- identify:
True: Always returnsTrueexcept for certain cases."safe": Only returnsTrueif the identifier is case-insensitive."unsafe": Only returnsTrueif the identifier is case-sensitive.
Returns:
Whether the given text can be identified.
87 class Tokenizer(tokens.Tokenizer): 88 VAR_SINGLE_TOKENS = {"@", "$", "#"} 89 90 UNICODE_STRINGS = [ 91 (prefix + q, q) 92 for q in t.cast(t.List[str], tokens.Tokenizer.QUOTES) 93 for prefix in ("U", "u") 94 ] 95 96 NESTED_COMMENTS = False 97 98 KEYWORDS = { 99 **tokens.Tokenizer.KEYWORDS, 100 "(+)": TokenType.JOIN_MARKER, 101 "BINARY_DOUBLE": TokenType.DOUBLE, 102 "BINARY_FLOAT": TokenType.FLOAT, 103 "BULK COLLECT INTO": TokenType.BULK_COLLECT_INTO, 104 "COLUMNS": TokenType.COLUMN, 105 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, 106 "MINUS": TokenType.EXCEPT, 107 "NVARCHAR2": TokenType.NVARCHAR, 108 "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY, 109 "SAMPLE": TokenType.TABLE_SAMPLE, 110 "START": TokenType.BEGIN, 111 "TOP": TokenType.TOP, 112 "VARCHAR2": TokenType.VARCHAR, 113 "SYSTIMESTAMP": TokenType.SYSTIMESTAMP, 114 }
Inherited Members
- sqlglot.tokens.Tokenizer
- Tokenizer
- SINGLE_TOKENS
- BIT_STRINGS
- BYTE_STRINGS
- HEX_STRINGS
- RAW_STRINGS
- HEREDOC_STRINGS
- IDENTIFIERS
- QUOTES
- STRING_ESCAPES
- ESCAPE_FOLLOW_CHARS
- IDENTIFIER_ESCAPES
- HEREDOC_TAG_IS_IDENTIFIER
- HEREDOC_STRING_ALTERNATIVE
- STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS
- HINT_START
- TOKENS_PRECEDING_HINT
- COMMANDS
- COMMAND_PREFIX_TOKENS
- NUMERIC_LITERALS
- COMMENTS
- dialect
- tokenize
- sql
- size
- tokens
116 class Parser(parser.Parser): 117 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP} 118 VALUES_FOLLOWED_BY_PAREN = False 119 120 FUNCTIONS = { 121 **parser.Parser.FUNCTIONS, 122 "CONVERT": exp.ConvertToCharset.from_arg_list, 123 "L2_DISTANCE": exp.EuclideanDistance.from_arg_list, 124 "NVL": lambda args: build_coalesce(args, is_nvl=True), 125 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), 126 "TO_CHAR": build_timetostr_or_tochar, 127 "TO_TIMESTAMP": _build_to_timestamp, 128 "TO_DATE": build_formatted_time(exp.StrToDate, "oracle"), 129 "TRUNC": lambda args, dialect: build_trunc( 130 args, dialect, date_trunc_unabbreviate=False, default_date_trunc_unit="DD" 131 ), 132 } 133 FUNCTIONS.pop("TO_BOOLEAN") 134 135 NO_PAREN_FUNCTION_PARSERS = { 136 **parser.Parser.NO_PAREN_FUNCTION_PARSERS, 137 "NEXT": lambda self: self._parse_next_value_for(), 138 "PRIOR": lambda self: self.expression(exp.Prior, this=self._parse_bitwise()), 139 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True), 140 "DBMS_RANDOM": lambda self: self._parse_dbms_random(), 141 } 142 143 NO_PAREN_FUNCTIONS = { 144 **parser.Parser.NO_PAREN_FUNCTIONS, 145 TokenType.SYSTIMESTAMP: exp.Systimestamp, 146 } 147 148 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { 149 **parser.Parser.FUNCTION_PARSERS, 150 "JSON_ARRAY": lambda self: self._parse_json_array( 151 exp.JSONArray, 152 expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())), 153 ), 154 "JSON_ARRAYAGG": lambda self: self._parse_json_array( 155 exp.JSONArrayAgg, 156 this=self._parse_format_json(self._parse_bitwise()), 157 order=self._parse_order(), 158 ), 159 "JSON_EXISTS": lambda self: self._parse_json_exists(), 160 } 161 FUNCTION_PARSERS.pop("CONVERT") 162 163 PROPERTY_PARSERS = { 164 **parser.Parser.PROPERTY_PARSERS, 165 "GLOBAL": lambda self: self._match_text_seq("TEMPORARY") 166 and self.expression(exp.TemporaryProperty, this="GLOBAL"), 167 "PRIVATE": lambda self: self._match_text_seq("TEMPORARY") 168 and self.expression(exp.TemporaryProperty, this="PRIVATE"), 169 "FORCE": lambda self: self.expression(exp.ForceProperty), 170 } 171 172 QUERY_MODIFIER_PARSERS = { 173 **parser.Parser.QUERY_MODIFIER_PARSERS, 174 TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()), 175 TokenType.WITH: lambda self: ("options", [self._parse_query_restrictions()]), 176 } 177 178 TYPE_LITERAL_PARSERS = { 179 exp.DataType.Type.DATE: lambda self, this, _: self.expression( 180 exp.DateStrToDate, this=this 181 ), 182 # https://docs.oracle.com/en/database/oracle/oracle-database/19/refrn/NLS_TIMESTAMP_FORMAT.html 183 exp.DataType.Type.TIMESTAMP: lambda self, this, _: _build_to_timestamp( 184 [this, '"%Y-%m-%d %H:%M:%S.%f"'] 185 ), 186 } 187 188 # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT .. 189 # Reference: https://stackoverflow.com/a/336455 190 DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE} 191 192 QUERY_RESTRICTIONS: OPTIONS_TYPE = { 193 "WITH": ( 194 ("READ", "ONLY"), 195 ("CHECK", "OPTION"), 196 ), 197 } 198 199 def _parse_dbms_random(self) -> t.Optional[exp.Expression]: 200 if self._match_text_seq(".", "VALUE"): 201 lower, upper = None, None 202 if self._match(TokenType.L_PAREN, advance=False): 203 lower_upper = self._parse_wrapped_csv(self._parse_bitwise) 204 if len(lower_upper) == 2: 205 lower, upper = lower_upper 206 207 return exp.Rand(lower=lower, upper=upper) 208 209 self._retreat(self._index - 1) 210 return None 211 212 def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E: 213 return self.expression( 214 expr_type, 215 null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"), 216 return_type=self._match_text_seq("RETURNING") and self._parse_type(), 217 strict=self._match_text_seq("STRICT"), 218 **kwargs, 219 ) 220 221 def _parse_hint_function_call(self) -> t.Optional[exp.Expression]: 222 if not self._curr or not self._next or self._next.token_type != TokenType.L_PAREN: 223 return None 224 225 this = self._curr.text 226 227 self._advance(2) 228 args = self._parse_hint_args() 229 this = self.expression(exp.Anonymous, this=this, expressions=args) 230 self._match_r_paren(this) 231 return this 232 233 def _parse_hint_args(self): 234 args = [] 235 result = self._parse_var() 236 237 while result: 238 args.append(result) 239 result = self._parse_var() 240 241 return args 242 243 def _parse_query_restrictions(self) -> t.Optional[exp.Expression]: 244 kind = self._parse_var_from_options(self.QUERY_RESTRICTIONS, raise_unmatched=False) 245 246 if not kind: 247 return None 248 249 return self.expression( 250 exp.QueryOption, 251 this=kind, 252 expression=self._match(TokenType.CONSTRAINT) and self._parse_field(), 253 ) 254 255 def _parse_json_exists(self) -> exp.JSONExists: 256 this = self._parse_format_json(self._parse_bitwise()) 257 self._match(TokenType.COMMA) 258 return self.expression( 259 exp.JSONExists, 260 this=this, 261 path=self.dialect.to_json_path(self._parse_bitwise()), 262 passing=self._match_text_seq("PASSING") 263 and self._parse_csv(lambda: self._parse_alias(self._parse_bitwise())), 264 on_condition=self._parse_on_condition(), 265 ) 266 267 def _parse_into(self) -> t.Optional[exp.Into]: 268 # https://docs.oracle.com/en/database/oracle/oracle-database/19/lnpls/SELECT-INTO-statement.html 269 bulk_collect = self._match(TokenType.BULK_COLLECT_INTO) 270 if not bulk_collect and not self._match(TokenType.INTO): 271 return None 272 273 index = self._index 274 275 expressions = self._parse_expressions() 276 if len(expressions) == 1: 277 self._retreat(index) 278 self._match(TokenType.TABLE) 279 return self.expression( 280 exp.Into, this=self._parse_table(schema=True), bulk_collect=bulk_collect 281 ) 282 283 return self.expression(exp.Into, bulk_collect=bulk_collect, expressions=expressions) 284 285 def _parse_connect_with_prior(self): 286 return self._parse_assignment() 287 288 def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: 289 this = super()._parse_column_ops(this) 290 291 if not this: 292 return this 293 294 index = self._index 295 296 # https://docs.oracle.com/en/database/oracle/oracle-database/26/sqlrf/Interval-Expressions.html 297 interval_span = self._try_parse(lambda: self._parse_interval_span(this)) 298 if interval_span and isinstance(interval_span.args.get("unit"), exp.IntervalSpan): 299 return interval_span 300 301 self._retreat(index) 302 return this 303 304 def _parse_insert_table(self) -> t.Optional[exp.Expression]: 305 # Oracle does not use AS for INSERT INTO alias 306 # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/INSERT.html 307 # Parse table parts without schema to avoid parsing the alias with its columns 308 this = self._parse_table_parts(schema=True) 309 310 if isinstance(this, exp.Table): 311 alias_name = self._parse_id_var(any_token=False) 312 if alias_name: 313 this.set("alias", exp.TableAlias(this=alias_name)) 314 315 this.set("partition", self._parse_partition()) 316 317 # Now parse the schema (column list) if present 318 return self._parse_schema(this=this) 319 320 return this
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
Inherited Members
- sqlglot.parser.Parser
- Parser
- STRUCT_TYPE_TOKENS
- NESTED_TYPE_TOKENS
- ENUM_TYPE_TOKENS
- AGGREGATE_TYPE_TOKENS
- TYPE_TOKENS
- SIGNED_TO_UNSIGNED_TYPE_TOKEN
- SUBQUERY_PREDICATES
- RESERVED_TOKENS
- DB_CREATABLES
- CREATABLES
- TRIGGER_EVENTS
- ALTERABLES
- ALIAS_TOKENS
- COLON_PLACEHOLDER_TOKENS
- ARRAY_CONSTRUCTORS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- ASSIGNMENT
- DISJUNCTION
- EQUALITY
- COMPARISON
- BITWISE
- TERM
- FACTOR
- EXPONENT
- TIMES
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- COLUMN_OPERATORS
- CAST_COLUMN_OPERATORS
- EXPRESSION_PARSERS
- STATEMENT_PARSERS
- UNARY_PARSERS
- STRING_PARSERS
- NUMERIC_PARSERS
- PRIMARY_PARSERS
- PLACEHOLDER_PARSERS
- RANGE_PARSERS
- PIPE_SYNTAX_TRANSFORM_PARSERS
- CONSTRAINT_PARSERS
- ALTER_PARSERS
- ALTER_ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- INVALID_FUNC_NAME_TOKENS
- FUNCTIONS_WITH_ALIASED_ARGS
- KEY_VALUE_DEFINITIONS
- QUERY_MODIFIER_TOKENS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_CONVERTERS
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- CONFLICT_ACTIONS
- TRIGGER_TIMING
- TRIGGER_DEFERRABLE
- CREATE_SEQUENCE
- ISOLATED_LOADING_OPTIONS
- USABLES
- CAST_ACTIONS
- SCHEMA_BINDING_OPTIONS
- PROCEDURE_OPTIONS
- EXECUTE_AS_OPTIONS
- KEY_CONSTRAINT_OPTIONS
- WINDOW_EXCLUDE_OPTIONS
- INSERT_ALTERNATIVES
- CLONE_KEYWORDS
- HISTORICAL_DATA_PREFIX
- HISTORICAL_DATA_KIND
- OPCLASS_FOLLOW_KEYWORDS
- OPTYPE_FOLLOW_TOKENS
- TABLE_INDEX_HINT_TOKENS
- VIEW_ATTRIBUTES
- WINDOW_ALIAS_TOKENS
- WINDOW_SIDES
- JSON_KEY_VALUE_SEPARATOR_TOKENS
- FETCH_TOKENS
- ADD_CONSTRAINT_TOKENS
- UNNEST_OFFSET_ALIAS_TOKENS
- SELECT_START_TOKENS
- COPY_INTO_VARLEN_OPTIONS
- IS_JSON_PREDICATE_KIND
- ODBC_DATETIME_LITERALS
- ON_CONDITION_TOKENS
- PRIVILEGE_FOLLOW_TOKENS
- DESCRIBE_STYLES
- SET_ASSIGNMENT_DELIMITERS
- ANALYZE_STYLES
- ANALYZE_EXPRESSION_PARSERS
- PARTITION_KEYWORDS
- AMBIGUOUS_ALIAS_TOKENS
- OPERATION_MODIFIERS
- RECURSIVE_CTE_SEARCH_KIND
- MODIFIABLES
- STRICT_CAST
- PREFIXED_PIVOT_COLUMNS
- IDENTIFY_PIVOT_STRINGS
- LOG_DEFAULTS_TO_LN
- TABLESAMPLE_CSV
- DEFAULT_SAMPLING_METHOD
- SET_REQUIRES_ASSIGNMENT_DELIMITER
- TRIM_PATTERN_FIRST
- STRING_ALIASES
- MODIFIERS_ATTACHED_TO_SET_OP
- SET_OP_MODIFIERS
- NO_PAREN_IF_COMMANDS
- JSON_ARROWS_REQUIRE_JSON_TYPE
- COLON_IS_VARIANT_EXTRACT
- SUPPORTS_IMPLICIT_UNNEST
- INTERVAL_SPANS
- SUPPORTS_PARTITION_SELECTION
- WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
- OPTIONAL_ALIAS_TOKEN_CTE
- ALTER_RENAME_REQUIRES_COLUMN
- ALTER_TABLE_PARTITIONS
- JOINS_HAVE_EQUAL_PRECEDENCE
- ZONE_AWARE_TIMESTAMP_CONSTRUCTOR
- MAP_KEYS_ARE_ARBITRARY_EXPRESSIONS
- JSON_EXTRACT_REQUIRES_JSON_EXPRESSION
- ADD_JOIN_ON_TRUE
- SUPPORTS_OMITTED_INTERVAL_SPAN_UNIT
- raise_error
- validate_expression
- reset
- errors
- error_level
- error_message_context
- max_errors
- dialect
- sql
- parse
- parse_into
- check_errors
- expression
- parse_set_operation
- build_cast
322 class Generator(generator.Generator): 323 LOCKING_READS_SUPPORTED = True 324 JOIN_HINTS = False 325 TABLE_HINTS = False 326 DATA_TYPE_SPECIFIERS_ALLOWED = True 327 ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False 328 LIMIT_FETCH = "FETCH" 329 TABLESAMPLE_KEYWORDS = "SAMPLE" 330 LAST_DAY_SUPPORTS_DATE_PART = False 331 SUPPORTS_SELECT_INTO = True 332 TZ_TO_WITH_TIME_ZONE = True 333 SUPPORTS_WINDOW_EXCLUDE = True 334 QUERY_HINT_SEP = " " 335 SUPPORTS_DECODE_CASE = True 336 337 TYPE_MAPPING = { 338 **generator.Generator.TYPE_MAPPING, 339 exp.DataType.Type.TINYINT: "SMALLINT", 340 exp.DataType.Type.SMALLINT: "SMALLINT", 341 exp.DataType.Type.INT: "INT", 342 exp.DataType.Type.BIGINT: "INT", 343 exp.DataType.Type.DECIMAL: "NUMBER", 344 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", 345 exp.DataType.Type.VARCHAR: "VARCHAR2", 346 exp.DataType.Type.NVARCHAR: "NVARCHAR2", 347 exp.DataType.Type.NCHAR: "NCHAR", 348 exp.DataType.Type.TEXT: "CLOB", 349 exp.DataType.Type.TIMETZ: "TIME", 350 exp.DataType.Type.TIMESTAMPNTZ: "TIMESTAMP", 351 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 352 exp.DataType.Type.BINARY: "BLOB", 353 exp.DataType.Type.VARBINARY: "BLOB", 354 exp.DataType.Type.ROWVERSION: "BLOB", 355 } 356 TYPE_MAPPING.pop(exp.DataType.Type.BLOB) 357 358 TRANSFORMS = { 359 **generator.Generator.TRANSFORMS, 360 exp.DateStrToDate: lambda self, e: self.func( 361 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD") 362 ), 363 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.unit), 364 exp.EuclideanDistance: rename_func("L2_DISTANCE"), 365 exp.ILike: no_ilike_sql, 366 exp.LogicalOr: rename_func("MAX"), 367 exp.LogicalAnd: rename_func("MIN"), 368 exp.Mod: rename_func("MOD"), 369 exp.Rand: rename_func("DBMS_RANDOM.VALUE"), 370 exp.Select: transforms.preprocess( 371 [ 372 transforms.eliminate_distinct_on, 373 transforms.eliminate_qualify, 374 ] 375 ), 376 exp.StrPosition: lambda self, e: ( 377 strposition_sql( 378 self, e, func_name="INSTR", supports_position=True, supports_occurrence=True 379 ) 380 ), 381 exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)), 382 exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)), 383 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "), 384 exp.Substring: rename_func("SUBSTR"), 385 exp.Table: lambda self, e: self.table_sql(e, sep=" "), 386 exp.TableSample: lambda self, e: self.tablesample_sql(e), 387 exp.TemporaryProperty: lambda _, e: f"{e.name or 'GLOBAL'} TEMPORARY", 388 exp.TimeToStr: lambda self, e: self.func("TO_CHAR", e.this, self.format_time(e)), 389 exp.ToChar: lambda self, e: self.function_fallback_sql(e), 390 exp.ToNumber: to_number_with_nls_param, 391 exp.Trim: _trim_sql, 392 exp.Unicode: lambda self, e: f"ASCII(UNISTR({self.sql(e.this)}))", 393 exp.UnixToTime: lambda self, 394 e: f"TO_DATE('1970-01-01', 'YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)", 395 exp.UtcTimestamp: rename_func("UTC_TIMESTAMP"), 396 exp.UtcTime: rename_func("UTC_TIME"), 397 exp.Systimestamp: lambda self, e: "SYSTIMESTAMP", 398 } 399 400 PROPERTIES_LOCATION = { 401 **generator.Generator.PROPERTIES_LOCATION, 402 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 403 } 404 405 def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str: 406 if expression.args.get("sysdate"): 407 return "SYSDATE" 408 409 this = expression.this 410 return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP" 411 412 def offset_sql(self, expression: exp.Offset) -> str: 413 return f"{super().offset_sql(expression)} ROWS" 414 415 def add_column_sql(self, expression: exp.Expression) -> str: 416 return f"ADD {self.sql(expression)}" 417 418 def queryoption_sql(self, expression: exp.QueryOption) -> str: 419 option = self.sql(expression, "this") 420 value = self.sql(expression, "expression") 421 value = f" CONSTRAINT {value}" if value else "" 422 423 return f"{option}{value}" 424 425 def coalesce_sql(self, expression: exp.Coalesce) -> str: 426 func_name = "NVL" if expression.args.get("is_nvl") else "COALESCE" 427 return rename_func(func_name)(self, expression) 428 429 def into_sql(self, expression: exp.Into) -> str: 430 into = "INTO" if not expression.args.get("bulk_collect") else "BULK COLLECT INTO" 431 if expression.this: 432 return f"{self.seg(into)} {self.sql(expression, 'this')}" 433 434 return f"{self.seg(into)} {self.expressions(expression)}" 435 436 def hint_sql(self, expression: exp.Hint) -> str: 437 expressions = [] 438 439 for expression in expression.expressions: 440 if isinstance(expression, exp.Anonymous): 441 formatted_args = self.format_args(*expression.expressions, sep=" ") 442 expressions.append(f"{self.sql(expression, 'this')}({formatted_args})") 443 else: 444 expressions.append(self.sql(expression)) 445 446 return f" /*+ {self.expressions(sqls=expressions, sep=self.QUERY_HINT_SEP).strip()} */" 447 448 def isascii_sql(self, expression: exp.IsAscii) -> str: 449 return f"NVL(REGEXP_LIKE({self.sql(expression.this)}, '^[' || CHR(1) || '-' || CHR(127) || ']*$'), TRUE)" 450 451 def interval_sql(self, expression: exp.Interval) -> str: 452 return f"{'INTERVAL ' if isinstance(expression.this, exp.Literal) else ''}{self.sql(expression, 'this')} {self.sql(expression, 'unit')}" 453 454 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: 455 param_constraint = expression.find(exp.InOutColumnConstraint) 456 if param_constraint: 457 sep = f" {self.sql(param_constraint)} " 458 param_constraint.pop() 459 return super().columndef_sql(expression, sep)
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True: Always quote except for specials cases. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether to normalize identifiers to lowercase. Default: False.
- pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
- indent: The indentation size in a formatted string. For example, this affects the
indentation of subqueries and filters under a
WHEREclause. Default: 2. - normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether to preserve comments in the output SQL code. Default: True
436 def hint_sql(self, expression: exp.Hint) -> str: 437 expressions = [] 438 439 for expression in expression.expressions: 440 if isinstance(expression, exp.Anonymous): 441 formatted_args = self.format_args(*expression.expressions, sep=" ") 442 expressions.append(f"{self.sql(expression, 'this')}({formatted_args})") 443 else: 444 expressions.append(self.sql(expression)) 445 446 return f" /*+ {self.expressions(sqls=expressions, sep=self.QUERY_HINT_SEP).strip()} */"
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- IGNORE_NULLS_IN_FUNC
- EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- SINGLE_STRING_INTERVAL
- INTERVAL_ALLOWS_PLURAL_FORM
- LIMIT_ONLY_LITERALS
- RENAME_TABLE_WITH_DB
- GROUPINGS_SEP
- INDEX_ON
- INOUT_SEPARATOR
- DIRECTED_JOINS
- QUERY_HINTS
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- EXTRACT_ALLOWS_QUOTES
- NVL2_SUPPORTED
- VALUES_AS_TABLE
- UNNEST_WITH_ORDINALITY
- AGGREGATE_FILTER_SUPPORTED
- SEMI_ANTI_JOIN_WITH_SIDE
- COMPUTED_COLUMN_WITH_TYPE
- SUPPORTS_TABLE_COPY
- TABLESAMPLE_REQUIRES_PARENS
- TABLESAMPLE_SIZE_IS_ROWS
- TABLESAMPLE_WITH_METHOD
- TABLESAMPLE_SEED_KEYWORD
- COLLATE_IS_FUNC
- ENSURE_BOOLS
- CTE_RECURSIVE_KEYWORD_REQUIRED
- SUPPORTS_SINGLE_ARG_CONCAT
- SUPPORTS_TABLE_ALIAS_COLUMNS
- UNPIVOT_ALIASES_ARE_IDENTIFIERS
- JSON_KEY_VALUE_PAIR_SEP
- INSERT_OVERWRITE
- SUPPORTS_UNLOGGED_TABLES
- SUPPORTS_CREATE_TABLE_LIKE
- LIKE_PROPERTY_INSIDE_SCHEMA
- MULTI_ARG_DISTINCT
- JSON_TYPE_REQUIRED_FOR_EXTRACTION
- JSON_PATH_BRACKETED_KEY_SUPPORTED
- JSON_PATH_SINGLE_QUOTE_ESCAPE
- SUPPORTED_JSON_PATH_PARTS
- CAN_IMPLEMENT_ARRAY_ANY
- SUPPORTS_TO_NUMBER
- SET_OP_MODIFIERS
- COPY_PARAMS_ARE_WRAPPED
- COPY_PARAMS_EQ_REQUIRED
- COPY_HAS_INTO_KEYWORD
- UNICODE_SUBSTITUTE
- STAR_EXCEPT
- HEX_FUNC
- WITH_PROPERTIES_PREFIX
- QUOTE_JSON_PATH
- PAD_FILL_PATTERN_IS_REQUIRED
- SUPPORTS_EXPLODING_PROJECTIONS
- ARRAY_CONCAT_IS_VAR_LEN
- SUPPORTS_CONVERT_TIMEZONE
- SUPPORTS_MEDIAN
- SUPPORTS_UNIX_SECONDS
- ALTER_SET_WRAPPED
- NORMALIZE_EXTRACT_DATE_PARTS
- PARSE_JSON_NAME
- ARRAY_SIZE_NAME
- ALTER_SET_TYPE
- ARRAY_SIZE_DIM_REQUIRED
- SUPPORTS_BETWEEN_FLAGS
- SUPPORTS_LIKE_QUANTIFIERS
- MATCH_AGAINST_TABLE_PREFIX
- SET_ASSIGNMENT_REQUIRES_VARIABLE_KEYWORD
- DECLARE_DEFAULT_ASSIGNMENT
- UPDATE_STATEMENT_SUPPORTS_FROM
- STAR_EXCLUDE_REQUIRES_DERIVED_TABLE
- UNSUPPORTED_TYPES
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- PARAMETER_TOKEN
- NAMED_PLACEHOLDER_TOKEN
- EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
- RESERVED_KEYWORDS
- WITH_SEPARATED_COMMENTS
- EXCLUDE_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- PARAMETERIZABLE_TEXT_TYPES
- EXPRESSIONS_WITHOUT_NESTED_CTES
- RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS
- SAFE_JSON_PATH_KEY_RE
- SENTINEL_LINE_BREAK
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- dialect
- normalize_functions
- unsupported_messages
- generate
- preprocess
- unsupported
- sep
- seg
- sanitize_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_parts
- column_sql
- pseudocolumn_sql
- columnposition_sql
- columnconstraint_sql
- computedcolumnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- generatedasrowcolumnconstraint_sql
- periodforsystemtimeconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- inoutcolumnconstraint_sql
- createable_sql
- create_sql
- sequenceproperties_sql
- triggerproperties_sql
- triggerreferencing_sql
- triggerevent_sql
- clone_sql
- describe_sql
- heredoc_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- unicodestring_sql
- rawstring_sql
- datatypeparam_sql
- datatype_sql
- directory_sql
- delete_sql
- drop_sql
- set_operation
- set_operations
- fetch_sql
- limitoptions_sql
- filter_sql
- indexparameters_sql
- index_sql
- identifier_sql
- hex_sql
- lowerhex_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- with_properties
- locate_properties
- property_name
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- partitionboundspec_sql
- partitionedofproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- withsystemversioningproperty_sql
- insert_sql
- introducer_sql
- kill_sql
- pseudotype_sql
- objectidentifier_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- historicaldata_sql
- table_parts
- table_sql
- tablefromrows_sql
- tablesample_sql
- pivot_sql
- version_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- from_sql
- groupingsets_sql
- rollup_sql
- rollupindex_sql
- rollupproperty_sql
- cube_sql
- group_sql
- having_sql
- connect_sql
- prior_sql
- join_sql
- lambda_sql
- lateral_op
- lateral_sql
- limit_sql
- setitem_sql
- set_sql
- queryband_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- booland_sql
- boolor_sql
- order_sql
- withfill_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognizemeasure_sql
- matchrecognize_sql
- query_modifiers
- options_modifier
- for_modifiers
- offset_limit_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- unnest_sql
- prewhere_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_offset_expressions
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- convert_concat_args
- concat_sql
- concatws_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonpath_sql
- json_path_part
- formatjson_sql
- formatphrase_sql
- jsonobject_sql
- jsonobjectagg_sql
- jsonarray_sql
- jsonarrayagg_sql
- jsoncolumndef_sql
- jsonschema_sql
- jsontable_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- pivotalias_sql
- aliases_sql
- atindex_sql
- attimezone_sql
- fromtimezone_sql
- add_sql
- and_sql
- or_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- strtotime_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- alterindex_sql
- alterdiststyle_sql
- altersortkey_sql
- alterrename_sql
- renamecolumn_sql
- alterset_sql
- alter_sql
- altersession_sql
- droppartition_sql
- addconstraint_sql
- addpartition_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- havingmax_sql
- intdiv_sql
- dpipe_sql
- div_sql
- safedivide_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- propertyeq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- is_sql
- like_sql
- ilike_sql
- match_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- sub_sql
- trycast_sql
- jsoncast_sql
- try_sql
- log_sql
- use_sql
- binary
- ceil_floor
- function_fallback_sql
- func
- format_args
- too_wide
- format_time
- expressions
- op_expressions
- naked_property
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- whens_sql
- merge_sql
- tochar_sql
- tonumber_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- duplicatekeyproperty_sql
- uniquekeyproperty_sql
- distributedbyproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- checkcolumnconstraint_sql
- indexcolumnconstraint_sql
- nvl2_sql
- comprehension_sql
- columnprefix_sql
- opclass_sql
- predict_sql
- generateembedding_sql
- mltranslate_sql
- mlforecast_sql
- featuresattime_sql
- vectorsearch_sql
- forin_sql
- refresh_sql
- toarray_sql
- tsordstotime_sql
- tsordstotimestamp_sql
- tsordstodatetime_sql
- tsordstodate_sql
- unixdate_sql
- lastday_sql
- dateadd_sql
- arrayany_sql
- struct_sql
- partitionrange_sql
- truncatetable_sql
- convert_sql
- copyparameter_sql
- credentials_sql
- copy_sql
- semicolon_sql
- datadeletionproperty_sql
- maskingpolicycolumnconstraint_sql
- gapfill_sql
- scope_resolution
- scoperesolution_sql
- parsejson_sql
- rand_sql
- changes_sql
- pad_sql
- summarize_sql
- explodinggenerateseries_sql
- converttimezone_sql
- json_sql
- jsonvalue_sql
- conditionalinsert_sql
- multitableinserts_sql
- oncondition_sql
- jsonextractquote_sql
- jsonexists_sql
- arrayagg_sql
- slice_sql
- apply_sql
- grant_sql
- revoke_sql
- grantprivilege_sql
- grantprincipal_sql
- columns_sql
- overlay_sql
- todouble_sql
- string_sql
- median_sql
- overflowtruncatebehavior_sql
- unixseconds_sql
- arraysize_sql
- attach_sql
- detach_sql
- attachoption_sql
- watermarkcolumnconstraint_sql
- encodeproperty_sql
- includeproperty_sql
- xmlelement_sql
- xmlkeyvalueoption_sql
- partitionbyrangeproperty_sql
- partitionbyrangepropertydynamic_sql
- unpivotcolumns_sql
- analyzesample_sql
- analyzestatistics_sql
- analyzehistogram_sql
- analyzedelete_sql
- analyzelistchainedrows_sql
- analyzevalidate_sql
- analyze_sql
- xmltable_sql
- xmlnamespace_sql
- export_sql
- declare_sql
- declareitem_sql
- recursivewithsearch_sql
- parameterizedagg_sql
- anonymousaggfunc_sql
- combinedaggfunc_sql
- combinedparameterizedagg_sql
- show_sql
- install_sql
- get_put_sql
- translatecharacters_sql
- decodecase_sql
- semanticview_sql
- getextract_sql
- datefromunixdate_sql
- space_sql
- buildproperty_sql
- refreshtriggerproperty_sql
- modelattribute_sql
- directorystage_sql
- uuid_sql
- initcap_sql
- localtime_sql
- localtimestamp_sql
- weekstart_sql
- chr_sql
- block_sql
- storedprocedure_sql
- ifblock_sql
- whileblock_sql
- execute_sql
- executesql_sql