Edit on GitHub

sqlglot.dialects.solr

 1from sqlglot import tokens
 2from sqlglot.dialects.dialect import Dialect, NormalizationStrategy
 3from sqlglot.generators.solr import SolrGenerator
 4from sqlglot.parsers.solr import SolrParser
 5
 6
 7# https://solr.apache.org/guide/solr/latest/query-guide/sql-query.html
 8
 9
10class Solr(Dialect):
11    NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
12    DPIPE_IS_STRING_CONCAT = False
13
14    Generator = SolrGenerator
15
16    Parser = SolrParser
17
18    class Tokenizer(tokens.Tokenizer):
19        QUOTES = ["'"]
20        IDENTIFIERS = ["`"]
class Solr(sqlglot.dialects.dialect.Dialect):
11class Solr(Dialect):
12    NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
13    DPIPE_IS_STRING_CONCAT = False
14
15    Generator = SolrGenerator
16
17    Parser = SolrParser
18
19    class Tokenizer(tokens.Tokenizer):
20        QUOTES = ["'"]
21        IDENTIFIERS = ["`"]
NORMALIZATION_STRATEGY = <NormalizationStrategy.CASE_INSENSITIVE: 'CASE_INSENSITIVE'>

Specifies the strategy according to which identifiers should be normalized.

DPIPE_IS_STRING_CONCAT = False

Whether the DPIPE token (||) is a string concatenation operator.

Parser = <class 'sqlglot.parsers.solr.SolrParser'>
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

STRINGS_SUPPORT_ESCAPED_SEQUENCES: bool = False

Whether string literals support escape sequences (e.g. \n). Set by the metaclass based on the tokenizer's STRING_ESCAPES.

BYTE_STRINGS_SUPPORT_ESCAPED_SEQUENCES: bool = False

Whether byte string literals support escape sequences. Set by the metaclass based on the tokenizer's BYTE_STRING_ESCAPES.

INITCAP_SUPPORTS_CUSTOM_DELIMITERS = False
tokenizer_class = <class 'Solr.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.dialects.dialect.JSONPathTokenizer'>
parser_class = <class 'sqlglot.parsers.solr.SolrParser'>
generator_class = <class 'sqlglot.generators.solr.SolrGenerator'>
TIME_TRIE: dict = {}
FORMAT_TRIE: dict = {}
INVERSE_TIME_MAPPING: dict[str, str] = {}
INVERSE_TIME_TRIE: dict = {}
INVERSE_FORMAT_MAPPING: dict[str, str] = {}
INVERSE_FORMAT_TRIE: dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: dict[str, str] = {}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
VALID_INTERVAL_UNITS: set[str] = {'DAYOFYEAR', 'H', 'MICROSEC', 'NSECONDS', 'S', 'MONTHS', 'DEC', 'EPOCH_SECOND', 'WEEKOFYEAR', 'CENTS', 'DW', 'QUARTERS', 'WEEKDAY_ISO', 'DECADE', 'NANOSEC', 'CENTURY', 'EPOCH', 'MSECS', 'M', 'DAYOFWEEK', 'WEEKDAY', 'CENTURIES', 'MILS', 'MI', 'DAYOFWEEKISO', 'MONTH', 'HOURS', 'DOW_ISO', 'WEEK_ISO', 'SEC', 'NSECOND', 'DAY', 'HH', 'MSEC', 'EPOCH_MICROSECONDS', 'CENT', 'DAYOFWEEK_ISO', 'YYY', 'YEAR', 'USECOND', 'C', 'EPOCH_SECONDS', 'YR', 'MIN', 'NSEC', 'WY', 'USECS', 'WEEKOFYEAR_ISO', 'DAY OF YEAR', 'YEARS', 'NANOSECOND', 'QTR', 'DOW', 'WEEKISO', 'EPOCH_MICROSECOND', 'DY', 'MILLISEC', 'MM', 'W', 'WEEK', 'SECONDS', 'NANOSECS', 'MILLISECON', 'SECOND', 'USECONDS', 'D', 'US', 'EPOCH_NANOSECONDS', 'Y', 'NS', 'EPOCH_NANOSECOND', 'YYYY', 'DW_ISO', 'DAYOFMONTH', 'QUARTER', 'MON', 'Q', 'MIL', 'MONS', 'WOY', 'WEEKOFYEARISO', 'MINUTE', 'YY', 'USEC', 'YRS', 'MILLISECONDS', 'MILLENNIUM', 'TIMEZONE_MINUTE', 'HR', 'HOUR', 'MICROSECONDS', 'EPOCH_MILLISECONDS', 'MILLISECOND', 'MILLISECS', 'MINS', 'MSECOND', 'WK', 'MSECONDS', 'DD', 'TIMEZONE_HOUR', 'TZM', 'MICROSECOND', 'MINUTES', 'DECS', 'QTRS', 'MS', 'TZH', 'MICROSECS', 'EPOCH_MILLISECOND', 'DECADES', 'HRS', 'DAY OF WEEK', 'SECS', 'DAYS', 'DOY', 'MILLENIA'}
BIT_START: str | None = None
BIT_END: str | None = None
HEX_START: str | None = None
HEX_END: str | None = None
BYTE_START: str | None = None
BYTE_END: str | None = None
UNICODE_START: str | None = None
UNICODE_END: str | None = None
class Solr.Tokenizer(sqlglot.tokens.Tokenizer):
19    class Tokenizer(tokens.Tokenizer):
20        QUOTES = ["'"]
21        IDENTIFIERS = ["`"]
QUOTES = ["'"]
IDENTIFIERS = ['`']
BYTE_STRING_ESCAPES: ClassVar[list[str]] = ["'"]