sqlglot.dialects.tsql
1from __future__ import annotations 2 3from sqlglot import tokens 4from sqlglot.dialects.dialect import ( 5 Dialect, 6 NormalizationStrategy, 7) 8from sqlglot.generators.tsql import TSQLGenerator 9from sqlglot.parsers.tsql import TSQLParser 10from sqlglot.tokens import TokenType 11from sqlglot.typing.tsql import EXPRESSION_METADATA 12 13 14class TSQL(Dialect): 15 LOG_BASE_FIRST = False 16 TYPED_DIVISION = True 17 CONCAT_COALESCE = True 18 CONCAT_WS_COALESCE = True 19 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 20 ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False 21 22 TIME_FORMAT = "'yyyy-mm-dd hh:mm:ss'" 23 24 EXPRESSION_METADATA = EXPRESSION_METADATA.copy() 25 26 DATE_PART_MAPPING = { 27 **Dialect.DATE_PART_MAPPING, 28 "QQ": "QUARTER", 29 "M": "MONTH", 30 "Y": "DAYOFYEAR", 31 "WW": "WEEK", 32 "N": "MINUTE", 33 "SS": "SECOND", 34 "MCS": "MICROSECOND", 35 "TZOFFSET": "TIMEZONE_MINUTE", 36 "TZ": "TIMEZONE_MINUTE", 37 "ISO_WEEK": "WEEKISO", 38 "ISOWK": "WEEKISO", 39 "ISOWW": "WEEKISO", 40 } 41 42 TIME_MAPPING = { 43 "year": "%Y", 44 "dayofyear": "%j", 45 "day": "%d", 46 "dy": "%d", 47 "y": "%Y", 48 "week": "%W", 49 "ww": "%W", 50 "wk": "%W", 51 "isowk": "%V", 52 "isoww": "%V", 53 "iso_week": "%V", 54 "hour": "%h", 55 "hh": "%I", 56 "minute": "%M", 57 "mi": "%M", 58 "n": "%M", 59 "second": "%S", 60 "ss": "%S", 61 "s": "%-S", 62 "millisecond": "%f", 63 "ms": "%f", 64 "weekday": "%w", 65 "dw": "%w", 66 "month": "%m", 67 "mm": "%M", 68 "m": "%-M", 69 "Y": "%Y", 70 "YYYY": "%Y", 71 "YY": "%y", 72 "MMMM": "%B", 73 "MMM": "%b", 74 "MM": "%m", 75 "M": "%-m", 76 "dddd": "%A", 77 "dd": "%d", 78 "d": "%-d", 79 "HH": "%H", 80 "H": "%-H", 81 "h": "%-I", 82 "ffffff": "%f", 83 "yyyy": "%Y", 84 "yy": "%y", 85 } 86 87 CONVERT_FORMAT_MAPPING = { 88 "0": "%b %d %Y %-I:%M%p", 89 "1": "%m/%d/%y", 90 "2": "%y.%m.%d", 91 "3": "%d/%m/%y", 92 "4": "%d.%m.%y", 93 "5": "%d-%m-%y", 94 "6": "%d %b %y", 95 "7": "%b %d, %y", 96 "8": "%H:%M:%S", 97 "9": "%b %d %Y %-I:%M:%S:%f%p", 98 "10": "mm-dd-yy", 99 "11": "yy/mm/dd", 100 "12": "yymmdd", 101 "13": "%d %b %Y %H:%M:ss:%f", 102 "14": "%H:%M:%S:%f", 103 "20": "%Y-%m-%d %H:%M:%S", 104 "21": "%Y-%m-%d %H:%M:%S.%f", 105 "22": "%m/%d/%y %-I:%M:%S %p", 106 "23": "%Y-%m-%d", 107 "24": "%H:%M:%S", 108 "25": "%Y-%m-%d %H:%M:%S.%f", 109 "100": "%b %d %Y %-I:%M%p", 110 "101": "%m/%d/%Y", 111 "102": "%Y.%m.%d", 112 "103": "%d/%m/%Y", 113 "104": "%d.%m.%Y", 114 "105": "%d-%m-%Y", 115 "106": "%d %b %Y", 116 "107": "%b %d, %Y", 117 "108": "%H:%M:%S", 118 "109": "%b %d %Y %-I:%M:%S:%f%p", 119 "110": "%m-%d-%Y", 120 "111": "%Y/%m/%d", 121 "112": "%Y%m%d", 122 "113": "%d %b %Y %H:%M:%S:%f", 123 "114": "%H:%M:%S:%f", 124 "120": "%Y-%m-%d %H:%M:%S", 125 "121": "%Y-%m-%d %H:%M:%S.%f", 126 "126": "%Y-%m-%dT%H:%M:%S.%f", 127 } 128 129 FORMAT_TIME_MAPPING = { 130 "y": "%B %Y", 131 "d": "%m/%d/%Y", 132 "H": "%-H", 133 "h": "%-I", 134 "s": "%Y-%m-%d %H:%M:%S", 135 "D": "%A,%B,%Y", 136 "f": "%A,%B,%Y %-I:%M %p", 137 "F": "%A,%B,%Y %-I:%M:%S %p", 138 "g": "%m/%d/%Y %-I:%M %p", 139 "G": "%m/%d/%Y %-I:%M:%S %p", 140 "M": "%B %-d", 141 "m": "%B %-d", 142 "O": "%Y-%m-%dT%H:%M:%S", 143 "u": "%Y-%M-%D %H:%M:%S%z", 144 "U": "%A, %B %D, %Y %H:%M:%S%z", 145 "T": "%-I:%M:%S %p", 146 "t": "%-I:%M", 147 "Y": "%a %Y", 148 } 149 150 class Tokenizer(tokens.Tokenizer): 151 IDENTIFIERS = [("[", "]"), '"'] 152 QUOTES = ["'", '"'] 153 HEX_STRINGS = [("0x", ""), ("0X", "")] 154 VAR_SINGLE_TOKENS = {"@", "$", "#"} 155 156 KEYWORDS = { 157 **tokens.Tokenizer.KEYWORDS, 158 "CLUSTERED INDEX": TokenType.INDEX, 159 "DATETIME2": TokenType.DATETIME2, 160 "DATETIMEOFFSET": TokenType.TIMESTAMPTZ, 161 "DECLARE": TokenType.DECLARE, 162 "EXEC": TokenType.EXECUTE, 163 "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT, 164 "GO": TokenType.COMMAND, 165 "IMAGE": TokenType.IMAGE, 166 "MONEY": TokenType.MONEY, 167 "NONCLUSTERED INDEX": TokenType.INDEX, 168 "NTEXT": TokenType.TEXT, 169 "OPTION": TokenType.OPTION, 170 "OUTPUT": TokenType.RETURNING, 171 "PRINT": TokenType.COMMAND, 172 "PROC": TokenType.PROCEDURE, 173 "REAL": TokenType.FLOAT, 174 "ROWVERSION": TokenType.ROWVERSION, 175 "SMALLDATETIME": TokenType.SMALLDATETIME, 176 "SMALLMONEY": TokenType.SMALLMONEY, 177 "SQL_VARIANT": TokenType.VARIANT, 178 "SYSTEM_USER": TokenType.CURRENT_USER, 179 "TOP": TokenType.TOP, 180 "TIMESTAMP": TokenType.ROWVERSION, 181 "TINYINT": TokenType.UTINYINT, 182 "UNIQUEIDENTIFIER": TokenType.UUID, 183 "UPDATE STATISTICS": TokenType.COMMAND, 184 "XML": TokenType.XML, 185 } 186 KEYWORDS.pop("/*+") 187 188 COMMANDS = {*tokens.Tokenizer.COMMANDS, TokenType.END} - {TokenType.EXECUTE} 189 190 Parser = TSQLParser 191 192 Generator = TSQLGenerator
15class TSQL(Dialect): 16 LOG_BASE_FIRST = False 17 TYPED_DIVISION = True 18 CONCAT_COALESCE = True 19 CONCAT_WS_COALESCE = True 20 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 21 ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False 22 23 TIME_FORMAT = "'yyyy-mm-dd hh:mm:ss'" 24 25 EXPRESSION_METADATA = EXPRESSION_METADATA.copy() 26 27 DATE_PART_MAPPING = { 28 **Dialect.DATE_PART_MAPPING, 29 "QQ": "QUARTER", 30 "M": "MONTH", 31 "Y": "DAYOFYEAR", 32 "WW": "WEEK", 33 "N": "MINUTE", 34 "SS": "SECOND", 35 "MCS": "MICROSECOND", 36 "TZOFFSET": "TIMEZONE_MINUTE", 37 "TZ": "TIMEZONE_MINUTE", 38 "ISO_WEEK": "WEEKISO", 39 "ISOWK": "WEEKISO", 40 "ISOWW": "WEEKISO", 41 } 42 43 TIME_MAPPING = { 44 "year": "%Y", 45 "dayofyear": "%j", 46 "day": "%d", 47 "dy": "%d", 48 "y": "%Y", 49 "week": "%W", 50 "ww": "%W", 51 "wk": "%W", 52 "isowk": "%V", 53 "isoww": "%V", 54 "iso_week": "%V", 55 "hour": "%h", 56 "hh": "%I", 57 "minute": "%M", 58 "mi": "%M", 59 "n": "%M", 60 "second": "%S", 61 "ss": "%S", 62 "s": "%-S", 63 "millisecond": "%f", 64 "ms": "%f", 65 "weekday": "%w", 66 "dw": "%w", 67 "month": "%m", 68 "mm": "%M", 69 "m": "%-M", 70 "Y": "%Y", 71 "YYYY": "%Y", 72 "YY": "%y", 73 "MMMM": "%B", 74 "MMM": "%b", 75 "MM": "%m", 76 "M": "%-m", 77 "dddd": "%A", 78 "dd": "%d", 79 "d": "%-d", 80 "HH": "%H", 81 "H": "%-H", 82 "h": "%-I", 83 "ffffff": "%f", 84 "yyyy": "%Y", 85 "yy": "%y", 86 } 87 88 CONVERT_FORMAT_MAPPING = { 89 "0": "%b %d %Y %-I:%M%p", 90 "1": "%m/%d/%y", 91 "2": "%y.%m.%d", 92 "3": "%d/%m/%y", 93 "4": "%d.%m.%y", 94 "5": "%d-%m-%y", 95 "6": "%d %b %y", 96 "7": "%b %d, %y", 97 "8": "%H:%M:%S", 98 "9": "%b %d %Y %-I:%M:%S:%f%p", 99 "10": "mm-dd-yy", 100 "11": "yy/mm/dd", 101 "12": "yymmdd", 102 "13": "%d %b %Y %H:%M:ss:%f", 103 "14": "%H:%M:%S:%f", 104 "20": "%Y-%m-%d %H:%M:%S", 105 "21": "%Y-%m-%d %H:%M:%S.%f", 106 "22": "%m/%d/%y %-I:%M:%S %p", 107 "23": "%Y-%m-%d", 108 "24": "%H:%M:%S", 109 "25": "%Y-%m-%d %H:%M:%S.%f", 110 "100": "%b %d %Y %-I:%M%p", 111 "101": "%m/%d/%Y", 112 "102": "%Y.%m.%d", 113 "103": "%d/%m/%Y", 114 "104": "%d.%m.%Y", 115 "105": "%d-%m-%Y", 116 "106": "%d %b %Y", 117 "107": "%b %d, %Y", 118 "108": "%H:%M:%S", 119 "109": "%b %d %Y %-I:%M:%S:%f%p", 120 "110": "%m-%d-%Y", 121 "111": "%Y/%m/%d", 122 "112": "%Y%m%d", 123 "113": "%d %b %Y %H:%M:%S:%f", 124 "114": "%H:%M:%S:%f", 125 "120": "%Y-%m-%d %H:%M:%S", 126 "121": "%Y-%m-%d %H:%M:%S.%f", 127 "126": "%Y-%m-%dT%H:%M:%S.%f", 128 } 129 130 FORMAT_TIME_MAPPING = { 131 "y": "%B %Y", 132 "d": "%m/%d/%Y", 133 "H": "%-H", 134 "h": "%-I", 135 "s": "%Y-%m-%d %H:%M:%S", 136 "D": "%A,%B,%Y", 137 "f": "%A,%B,%Y %-I:%M %p", 138 "F": "%A,%B,%Y %-I:%M:%S %p", 139 "g": "%m/%d/%Y %-I:%M %p", 140 "G": "%m/%d/%Y %-I:%M:%S %p", 141 "M": "%B %-d", 142 "m": "%B %-d", 143 "O": "%Y-%m-%dT%H:%M:%S", 144 "u": "%Y-%M-%D %H:%M:%S%z", 145 "U": "%A, %B %D, %Y %H:%M:%S%z", 146 "T": "%-I:%M:%S %p", 147 "t": "%-I:%M", 148 "Y": "%a %Y", 149 } 150 151 class Tokenizer(tokens.Tokenizer): 152 IDENTIFIERS = [("[", "]"), '"'] 153 QUOTES = ["'", '"'] 154 HEX_STRINGS = [("0x", ""), ("0X", "")] 155 VAR_SINGLE_TOKENS = {"@", "$", "#"} 156 157 KEYWORDS = { 158 **tokens.Tokenizer.KEYWORDS, 159 "CLUSTERED INDEX": TokenType.INDEX, 160 "DATETIME2": TokenType.DATETIME2, 161 "DATETIMEOFFSET": TokenType.TIMESTAMPTZ, 162 "DECLARE": TokenType.DECLARE, 163 "EXEC": TokenType.EXECUTE, 164 "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT, 165 "GO": TokenType.COMMAND, 166 "IMAGE": TokenType.IMAGE, 167 "MONEY": TokenType.MONEY, 168 "NONCLUSTERED INDEX": TokenType.INDEX, 169 "NTEXT": TokenType.TEXT, 170 "OPTION": TokenType.OPTION, 171 "OUTPUT": TokenType.RETURNING, 172 "PRINT": TokenType.COMMAND, 173 "PROC": TokenType.PROCEDURE, 174 "REAL": TokenType.FLOAT, 175 "ROWVERSION": TokenType.ROWVERSION, 176 "SMALLDATETIME": TokenType.SMALLDATETIME, 177 "SMALLMONEY": TokenType.SMALLMONEY, 178 "SQL_VARIANT": TokenType.VARIANT, 179 "SYSTEM_USER": TokenType.CURRENT_USER, 180 "TOP": TokenType.TOP, 181 "TIMESTAMP": TokenType.ROWVERSION, 182 "TINYINT": TokenType.UTINYINT, 183 "UNIQUEIDENTIFIER": TokenType.UUID, 184 "UPDATE STATISTICS": TokenType.COMMAND, 185 "XML": TokenType.XML, 186 } 187 KEYWORDS.pop("/*+") 188 189 COMMANDS = {*tokens.Tokenizer.COMMANDS, TokenType.END} - {TokenType.EXECUTE} 190 191 Parser = TSQLParser 192 193 Generator = TSQLGenerator
Whether the base comes first in the LOG function.
Possible values: True, False, None (two arguments are not supported by LOG)
Whether the behavior of a / b depends on the types of a and b.
False means a / b is always float division.
True means a / b is integer division if both a and b are integers.
A NULL arg in CONCAT yields NULL by default, but in some dialects it yields an empty string.
A NULL arg in CONCAT_WS yields NULL by default, but in some dialects it is skipped.
Specifies the strategy according to which identifiers should be normalized.
Associates this dialect's time formats with their equivalent Python strftime formats.
Whether string literals support escape sequences (e.g. \n). Set by the metaclass based on the tokenizer's STRING_ESCAPES.
Whether byte string literals support escape sequences. Set by the metaclass based on the tokenizer's BYTE_STRING_ESCAPES.
151 class Tokenizer(tokens.Tokenizer): 152 IDENTIFIERS = [("[", "]"), '"'] 153 QUOTES = ["'", '"'] 154 HEX_STRINGS = [("0x", ""), ("0X", "")] 155 VAR_SINGLE_TOKENS = {"@", "$", "#"} 156 157 KEYWORDS = { 158 **tokens.Tokenizer.KEYWORDS, 159 "CLUSTERED INDEX": TokenType.INDEX, 160 "DATETIME2": TokenType.DATETIME2, 161 "DATETIMEOFFSET": TokenType.TIMESTAMPTZ, 162 "DECLARE": TokenType.DECLARE, 163 "EXEC": TokenType.EXECUTE, 164 "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT, 165 "GO": TokenType.COMMAND, 166 "IMAGE": TokenType.IMAGE, 167 "MONEY": TokenType.MONEY, 168 "NONCLUSTERED INDEX": TokenType.INDEX, 169 "NTEXT": TokenType.TEXT, 170 "OPTION": TokenType.OPTION, 171 "OUTPUT": TokenType.RETURNING, 172 "PRINT": TokenType.COMMAND, 173 "PROC": TokenType.PROCEDURE, 174 "REAL": TokenType.FLOAT, 175 "ROWVERSION": TokenType.ROWVERSION, 176 "SMALLDATETIME": TokenType.SMALLDATETIME, 177 "SMALLMONEY": TokenType.SMALLMONEY, 178 "SQL_VARIANT": TokenType.VARIANT, 179 "SYSTEM_USER": TokenType.CURRENT_USER, 180 "TOP": TokenType.TOP, 181 "TIMESTAMP": TokenType.ROWVERSION, 182 "TINYINT": TokenType.UTINYINT, 183 "UNIQUEIDENTIFIER": TokenType.UUID, 184 "UPDATE STATISTICS": TokenType.COMMAND, 185 "XML": TokenType.XML, 186 } 187 KEYWORDS.pop("/*+") 188 189 COMMANDS = {*tokens.Tokenizer.COMMANDS, TokenType.END} - {TokenType.EXECUTE}
Inherited Members
- sqlglot.tokens.Tokenizer
- Tokenizer
- SINGLE_TOKENS
- BIT_STRINGS
- BYTE_STRINGS
- RAW_STRINGS
- HEREDOC_STRINGS
- UNICODE_STRINGS
- STRING_ESCAPES
- ESCAPE_FOLLOW_CHARS
- IDENTIFIER_ESCAPES
- HEREDOC_TAG_IS_IDENTIFIER
- HEREDOC_STRING_ALTERNATIVE
- STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS
- NESTED_COMMENTS
- HINT_START
- TOKENS_PRECEDING_HINT
- COMMAND_PREFIX_TOKENS
- NUMERIC_LITERALS
- NUMBERS_CAN_HAVE_DECIMALS
- COMMENTS
- dialect
- tokenize
- sql
- size
- tokens