https://github.com/alibaba/druid/wiki/Druid_SQL_AST
https://github.com/alibaba/druid/wiki/SQL-Parser
https://www.cnblogs.com/lay2017/p/9840394.html
https://blog.csdn.net/prestigeding/article/details/72318482
https://blog.csdn.net/zhxdick/article/details/51350854
https://blog.csdn.net/zhouhao88410234/article/details/72356123
词法分析,把源码中的一行行代码按照事先规定好的格式分隔成一个个单词符号(token),比如数字,变量名称,函数等等。
语法分析:分析词法分析后的一个个token,是否能够拼装,组成事先规定好的语法中的一个。文法呢,就是对上面语法的规定的集合(程序里面用到的主要是上下文无关文法)
SQL解析可以分为三层:语句解析->表达式解析->词法解析。对应的主要类分别是
Lexer:用来解析出每个词的词义
SQLExprParser:用来解析出不同表达式的含义
SQLStatementParser:多个表达式和词组成完整的语句
public enum Token {
SELECT("SELECT"),
DELETE("DELETE"),
INSERT("INSERT"),
UPDATE("UPDATE"),
......
public final String name;
Token(){
this(null);
}
Token(String name){
this.name = name;
}
}
public class Keywords {
private final Map<String, Token> keywords;
private long[] hashArray;
private Token[] tokens;
public final static Keywords DEFAULT_KEYWORDS;
public final static Keywords SQLITE_KEYWORDS;
static {
Map<String, Token> map = new HashMap<String, Token>();
map.put("ALL", Token.ALL);
map.put("ALTER", Token.ALTER);
map.put("AND", Token.AND);
map.put("ANY", Token.ANY);
map.put("AS", Token.AS);
......
}
}
Lexer词法分析
public class Lexer {
protected static SymbolTable symbols_l2 = new SymbolTable(512);
protected int features = 0;
public final String text; //保存目前的整个SQL语句
protected int pos; //当前处理位置
protected int mark; //当前处理词的开始位置
protected char ch; //当前处理字符
protected char[] buf; //当前缓存的处理词
protected int bufPos; //用于取出词的标记(从text的mark位置到mark+bufPos结束)
protected Token token; //当前位于的关键词
protected Keywords keywods = Keywords.DEFAULT_KEYWORDS; //所有关键词集合
protected String stringVal;//当前处理词
protected int commentCount = 0;
protected List<String> comments = null; //注释
protected boolean skipComment = true; //是否跳过注释
private SavePoint savePoint = null; //保存点
/*
* anti sql injection
*/
private boolean allowComment = true; //是否允许注释
private int varIndex = -1; //针对?表达式
protected CommentHandler commentHandler; //注释处理器
protected boolean endOfComment = false; //是否注释结尾
protected boolean keepComments = false; //是否保留注释
protected int line = 0; //当前处理行数
protected int lines = 0; //总行数
protected String dbType; //数据库类型
protected boolean optimizedForParameterized = false;
private int startPos;
private int posLine;
private int posColumn;
private final static int[] digits = new int[(int) '9' + 1];//ascii 0 - 9数字
.....
}
SQLParser
public class SQLParser {
protected final Lexer lexer; //词法解析
protected String dbType; //数据库类型
private int errorEndPos = -1; //解析出错记录位置
......
}
SQLStatementParser
public class SQLStatementParser extends SQLParser {
protected SQLExprParser exprParser;
protected boolean parseCompleteValues = true;
protected int parseValuesSize = 3;
protected SQLSelectListCache selectListCache = null;
protected InsertColumnsCache insertColumnsCache = null;
......
}
mysql 解析SQL分析
所有SQL基本都会使用到标识符,用以引用某个数据库或其构成元素。
不加引号的标识符可以由大小写形式的字母a-z,数字0-9,美元符号,下划线,以及范围在U+0080到U+FFFF之间的Unicode扩展字符构成。
不加引号的标识符不允许完全由数字字符构成,因为那样会难以与数字区分开来。
标识符可以用反引号 引起来,这对SQL保留字或者包含空格或其他特殊字符很有用。
如果启用了SQL的ANSI_QUOTES模式,可以使用双引号将标识符引起来。
(ANSI是SQL的通用标准,QUOTES是指用什么符号来引用对象。
ANSI_QUOTES就是标准的SQL引用方式,不同数据库在一些单双引号,as别名,对象名等方面符号注明不同。
分词逻辑:
空白字符:跳过,位置+1,继续循环,空白字符为换号是行号+1,
scanIdentifier:扫描标识符
继承关系
- MySqlStatementParser->SQLStatementParser->SQLParser
- MySqlExprParser->SQLExprParser->SQLParser
- MySqlLexer->Lexer
解析代码
MySqlStatementParser parser = new MySqlStatementParser("select * from t order by id");
List<SQLStatement> statementList = parser.parseStatementList();
代码分析
SQLStatementParser->MySqlStatementParser->SQLParser
MySqlStatementParser初始化逻辑:
public MySqlStatementParser(String sql) {
//构造函数创建MySqlExprParser
super(new MySqlExprParser(sql));
}
public MySqlExprParser(String sql){
//构造函数创建MySqlLexer
this(new MySqlLexer(sql));
//获取一个Token
this.lexer.nextToken();
}
public MySqlLexer(String input){
//静态方法获取mysql关键字
this(input, true, true);
}
//parser.parseStatementList();
for (;;) {
switch (lexer.token) {
case SELECT: {
//token为select时进行select语句解析
SQLStatement stmt = parseSelect();
stmt.setParent(parent);
statementList.add(stmt);
continue;
}
case INSERT: {
//insert语句解析
SQLStatement stmt = parseInsert();
stmt.setParent(parent);
statementList.add(stmt);
continue;
}
......
}
parseSelect()语句解析
SQLStatementParser extends SQLParser
public SQLStatement parseSelect() {
SQLSelectParser selectParser = createSQLSelectParser();
SQLSelect select = selectParser.select();
return new SQLSelectStatement(select,getDbType());
}
public SQLSelectParser createSQLSelectParser() {
return new SQLSelectParser(this.exprParser, selectListCache);
}
SQLSelectParser extends SQLParser
public SQLSelectParser(SQLExprParser exprParser, SQLSelectListCache selectListCache){
super(exprParser.getLexer(), exprParser.getDbType());
this.exprParser = exprParser;
this.selectListCache = selectListCache;
}
new MySqlStatementParser(sql)构造方法设置表达式解析器为MySqlExprParser,MySqlExprParser设置词法解析器为MySqlExprParser
public class SQLParser {
protected final Lexer lexer;
protected String dbType;
public SQLParser(Lexer lexer, String dbType){
this.lexer = lexer;
this.dbType = dbType;
}
}
public class SQLStatementParser extends SQLParser {
protected SQLExprParser exprParser;
protected boolean parseCompleteValues = true;
protected int parseValuesSize = 3;
protected SQLSelectListCache selectListCache = null;
protected InsertColumnsCache insertColumnsCache = null;
public SQLStatementParser(SQLExprParser exprParser){
super(exprParser.getLexer(), exprParser.getDbType());
this.exprParser = exprParser;
}
}
public class MySqlStatementParser extends SQLStatementParser {
public MySqlStatementParser(String sql) {
//调用父类构造方法,设置解析器MySqlExprParser
super(new MySqlExprParser(sql));
}
}
public class SQLExprParser extends SQLParser {
public final static String[] AGGREGATE_FUNCTIONS;
public final static long[] AGGREGATE_FUNCTIONS_CODES;
static {
String[] strings = { "AVG", "COUNT", "MAX", "MIN", "STDDEV", "SUM" };
AGGREGATE_FUNCTIONS_CODES = FnvHash.fnv1a_64_lower(strings, true);
AGGREGATE_FUNCTIONS = new String[AGGREGATE_FUNCTIONS_CODES.length];
for (String str : strings) {
long hash = FnvHash.fnv1a_64_lower(str);
int index = Arrays.binarySearch(AGGREGATE_FUNCTIONS_CODES, hash);
AGGREGATE_FUNCTIONS[index] = str;
}
}
protected String[] aggregateFunctions = AGGREGATE_FUNCTIONS;
protected long[] aggregateFunctionHashCodes = AGGREGATE_FUNCTIONS_CODES;
}
public class MySqlExprParser extends SQLExprParser {
public final static String[] AGGREGATE_FUNCTIONS;
public final static long[] AGGREGATE_FUNCTIONS_CODES;
static {
String[] strings = {"AVG","COUNT","GROUP_CONCAT","MAX", "MIN","STDDEV","SUM" };
AGGREGATE_FUNCTIONS_CODES = FnvHash.fnv1a_64_lower(strings, true);
AGGREGATE_FUNCTIONS = new String[AGGREGATE_FUNCTIONS_CODES.length];
for (String str : strings) {
long hash = FnvHash.fnv1a_64_lower(str);
int index = Arrays.binarySearch(AGGREGATE_FUNCTIONS_CODES, hash);
AGGREGATE_FUNCTIONS[index] = str;
}
}
public MySqlExprParser(Lexer lexer){
super(lexer, JdbcConstants.MYSQL);
this.aggregateFunctions = AGGREGATE_FUNCTIONS;
this.aggregateFunctionHashCodes = AGGREGATE_FUNCTIONS_CODES;
}
public MySqlExprParser(String sql){
this(new MySqlLexer(sql));
this.lexer.nextToken();
}
}
MySqlStatementParser构造方法创建MySqlExprParser,MySqlExprParser构造方法创建MySqlLexer
初始化Lexer之后,回到MySqlExprParser的构造器,初始化KeyWords集合
public class MySqlLexer extends Lexer {
public static SymbolTable quoteTable = new SymbolTable(8192);
public final static Keywords DEFAULT_MYSQL_KEYWORDS;//Map<String, Token> keywords
private final static boolean[] identifierFlags = new boolean[256];//sql有效字符
//设置mysql关键字
static {
Map<String, Token> map = new HashMap<String, Token>();
//SQL通用关键字
map.putAll(Keywords.DEFAULT_KEYWORDS.getKeywords());
map.put("DUAL", Token.DUAL); //mysql特有关键字
......
DEFAULT_MYSQL_KEYWORDS = new Keywords(map);
}
//sql有效字符([0-9a-zA-Z_]),字母、数字、下划线
static {
for (char c = 0; c < identifierFlags.length; ++c) {
if (c >= 'A' && c <= 'Z') {
identifierFlags[c] = true;
} else if (c >= 'a' && c <= 'z') {
identifierFlags[c] = true;
} else if (c >= '0' && c <= '9') {
identifierFlags[c] = true;
}
}
// identifierFlags['`'] = true;
identifierFlags['_'] = true;
//identifierFlags['-'] = true; // mysql
}
}
CharTypes
public class CharTypes {
private final static boolean[] hexFlags = new boolean[256]; //是否16进制字符
//SQL开始标识符,a-z,A-Z,`,_,$,大小写字母,转义字符,下划线,货币符
private final static boolean[] firstIdentifierFlags = new boolean[256];
//SQL标识符,a-z,A-Z,0-9,_,$,#
private final static boolean[] identifierFlags = new boolean[256];
//SQL标识符转字符串
private final static String[] stringCache = new String[256];
//https://blog.csdn.net/yueyueniaolzp/article/details/82178954
//SQL空白字符或控制字符(ASCII表0-32)
private final static boolean[] whitespaceFlags = new boolean[256];
}
Lexer获取token
public final void nextToken() {
startPos = pos;
bufPos = 0;
if (comments != null && comments.size() > 0) {
comments = null;
}
this.lines = 0;
int startLine = line;
for (;;) {
if (isWhitespace(ch)) { //空白字符或控制字符
if (ch == '\n') { //换行符
line++; //当前行+1
lines = line - startLine;
}
ch = charAt(++pos); //当前字符
continue;
}
if (ch == '$' && charAt(pos + 1) == '{') { //变量字符(:,#,$)
scanVariable(); //stringVal当前处理词 Token.VARIANT
return;
}
if (isFirstIdentifierChar(ch)) { //是否SQL开始标识符
if (ch == '(') {
scanChar(); //位置+1
token = LPAREN; //token为左括号LPAREN
return;
} else if (ch == ')') {
scanChar(); //位置+1
token = RPAREN; //token为右括号LPAREN
return;
}
if (ch == 'N' || ch == 'n') {
if (charAt(pos + 1) == '\'') {
++pos;
ch = '\'';
scanString();
token = Token.LITERAL_NCHARS;
return;
}
}
scanIdentifier(); //扫描标识符
return;
}
switch (ch) {
case '0':
if (charAt(pos + 1) == 'x') {
scanChar();
scanChar();
scanHexaDecimal(); //扫描16进制
} else {
scanNumber(); //扫描数字
}
return;
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
scanNumber(); //扫描数字
return;
case ',':
case ',':
scanChar();
token = COMMA;
return;
case '(':
case '(':
scanChar();
token = LPAREN;
return;
case ')':
case ')':
scanChar();
token = RPAREN;
return;
case '[':
scanLBracket();
return;
case ']':
scanChar();
token = RBRACKET;
return;
case '{':
scanChar();
token = LBRACE;
return;
case '}':
scanChar();
token = RBRACE;
return;
case ':':
scanChar();
if (ch == '=') {
scanChar();
token = COLONEQ;
} else if (ch == ':') {
scanChar();
token = COLONCOLON;
} else {
unscan();
scanVariable();
}
return;
case '#':
scanSharp();
if ((token == Token.LINE_COMMENT || token == Token.MULTI_LINE_COMMENT) && skipComment) {
bufPos = 0;
continue;
}
return;
case '.':
scanChar();
if (isDigit(ch) && !isFirstIdentifierChar(charAt(pos - 2))) {
unscan();
scanNumber();
return;
} else if (ch == '.') {
scanChar();
if (ch == '.') {
scanChar();
token = Token.DOTDOTDOT;
} else {
token = Token.DOTDOT;
}
} else {
token = Token.DOT;
}
return;
case '\'':
scanString();
return;
case '\"':
scanAlias();
return;
case '*':
scanChar();
token = Token.STAR;
return;
case '?':
scanChar();
if (ch == '?' && JdbcConstants.POSTGRESQL.equals(dbType)) {
scanChar();
if (ch == '|') {
scanChar();
token = Token.QUESBAR;
} else {
token = Token.QUESQUES;
}
} else if (ch == '|' && JdbcConstants.POSTGRESQL.equals(dbType)) {
scanChar();
if (ch == '|') {
unscan();
token = Token.QUES;
} else {
token = Token.QUESBAR;
}
} else if (ch == '&' && JdbcConstants.POSTGRESQL.equals(dbType)) {
scanChar();
token = Token.QUESAMP;
} else {
token = Token.QUES;
}
return;
case ';':
scanChar();
token = Token.SEMI;
return;
case '`':
throw new ParserException("TODO. " + info()); // TODO
case '@':
scanVariable_at();
return;
case '-':
if (charAt(pos +1) == '-') {
scanComment();
if ((token == Token.LINE_COMMENT || token == Token.MULTI_LINE_COMMENT) && skipComment) {
bufPos = 0;
continue;
}
} else {
scanOperator();
}
return;
case '/':
int nextChar = charAt(pos + 1);
if (nextChar == '/' || nextChar == '*') {
scanComment();
if ((token == Token.LINE_COMMENT || token == Token.MULTI_LINE_COMMENT) && skipComment) {
bufPos = 0;
continue;
}
} else {
token = Token.SLASH;
scanChar();
}
return;
default:
if (Character.isLetter(ch)) {
scanIdentifier();
return;
}
if (isOperator(ch)) {
scanOperator();
return;
}
if (ch == '\\' && charAt(pos + 1) == 'N'
&& JdbcConstants.MYSQL.equals(dbType)) {
scanChar();
scanChar();
token = Token.NULL;
return;
}
// QS_TODO ?
if (isEOF()) { // JLS
token = EOF;
} else {
lexError("illegal.char", String.valueOf((int) ch));
scanChar();
}
return;
}
}
}
判断dbType类型,创建SQLStatementParser,支持的SQLStatementParser
- OracleStatementParser
- MySqlStatementParser
- PGSQLStatementParser
- SQLServerStatementParser
- H2StatementParser
- DB2StatementParser
- OdpsStatementParser
- PhoenixStatementParser
- HiveStatementParser
- SQLStatementParser
未匹配数据库类型时默认采用SQLStatementParse
com.alibaba.druid.sql.parser.SQLExprParser
public SQLExpr expr() {
if (lexer.token == Token.STAR) {
lexer.nextToken();
SQLExpr expr = new SQLAllColumnExpr();
if (lexer.token == Token.DOT) {
lexer.nextToken();
accept(Token.STAR);
return new SQLPropertyExpr(expr, "*");
}
return expr;
}
SQLExpr expr = primary();
Token token = lexer.token;
if (token == Token.COMMA) {
return expr;
} else if (token == Token.EQ) {
expr = relationalRest(expr);
expr = andRest(expr);
expr = xorRest(expr);
expr = orRest(expr);
return expr;
} else {
return exprRest(expr);
}
}
public SQLExpr primary() {
.....
}
public SQLExpr exprRest(SQLExpr expr) {
expr = bitXorRest(expr);
expr = multiplicativeRest(expr);
expr = additiveRest(expr);
expr = shiftRest(expr);
expr = bitAndRest(expr);
expr = bitOrRest(expr);
expr = inRest(expr);
expr = relationalRest(expr);
//expr = equalityRest(expr);
expr = andRest(expr);
expr = xorRest(expr);
expr = orRest(expr);
return expr;
}
public final SQLExpr bitXor() {
SQLExpr expr = primary();
return bitXorRest(expr);
}
public SQLExpr bitXorRest(SQLExpr expr) {
.....
}
public final SQLExpr multiplicative() {
SQLExpr expr = bitXor();
return multiplicativeRest(expr);
}
public SQLExpr multiplicativeRest(SQLExpr expr) {
......
}
public final SQLExpr additive() {
SQLExpr expr = multiplicative();
if (lexer.token == Token.PLUS
|| lexer.token == Token.BARBAR
|| lexer.token == Token.CONCAT
|| lexer.token == Token.SUB) {
expr = additiveRest(expr);
}
return expr;
}
public SQLExpr primary() {
}
public SQLExpr primaryRest(SQLExpr expr) {
......
}
protected SQLExpr dotRest(SQLExpr expr) {
......
}
private SQLExpr methodRest(SQLExpr expr, String name, boolean aggregate) {
......
}
public SQLName name() {
......
}
public SQLName nameRest(SQLName name) {
......
}
public final SQLExpr bitAnd() {
SQLExpr expr = shift();
return bitAndRest(expr);
}
public final SQLExpr bitAndRest(SQLExpr expr) {
while (lexer.token == Token.AMP) {
lexer.nextToken();
SQLExpr rightExp = shift();
expr = new SQLBinaryOpExpr(expr, SQLBinaryOperator.BitwiseAnd, rightExp, getDbType());
}
return expr;
}
public final SQLExpr bitOr() {
SQLExpr expr = bitAnd();
return bitOrRest(expr);
}
public final SQLExpr bitOrRest(SQLExpr expr) {
while (lexer.token == Token.BAR) {
lexer.nextToken();
SQLBinaryOperator op = SQLBinaryOperator.BitwiseOr;
if (lexer.token == Token.BAR) {
lexer.nextToken();
op = SQLBinaryOperator.Concat;
}
SQLExpr rightExp = bitAnd();
expr = new SQLBinaryOpExpr(expr, op, rightExp, getDbType());
expr = bitAndRest(expr);
}
return expr;
}
public final SQLExpr inRest(SQLExpr expr) {
......
}
public SQLExpr additiveRest(SQLExpr expr) {
Token token = lexer.token;
if (token == Token.PLUS) {
lexer.nextToken();
SQLExpr rightExp = multiplicative();
expr = new SQLBinaryOpExpr(expr, SQLBinaryOperator.Add, rightExp, dbType);
expr = additiveRest(expr);
} else if ((token == Token.BARBAR || token == Token.CONCAT)
&& (isEnabled(SQLParserFeature.PipesAsConcat) || !JdbcConstants.MYSQL.equals(dbType))) {
lexer.nextToken();
SQLExpr rightExp = multiplicative();
expr = new SQLBinaryOpExpr(expr, SQLBinaryOperator.Concat, rightExp, dbType);
expr = additiveRest(expr);
} else if (token == Token.SUB) {
lexer.nextToken();
SQLExpr rightExp = multiplicative();
expr = new SQLBinaryOpExpr(expr, SQLBinaryOperator.Subtract, rightExp, dbType);
expr = additiveRest(expr);
}
return expr;
}
public final SQLExpr shift() {
SQLExpr expr = additive();
return shiftRest(expr);
}
public SQLExpr shiftRest(SQLExpr expr) {
if (lexer.token == Token.LTLT) {
lexer.nextToken();
SQLExpr rightExp = additive();
expr = new SQLBinaryOpExpr(expr, SQLBinaryOperator.LeftShift, rightExp, dbType);
expr = shiftRest(expr);
} else if (lexer.token == Token.GTGT) {
lexer.nextToken();
SQLExpr rightExp = additive();
expr = new SQLBinaryOpExpr(expr, SQLBinaryOperator.RightShift, rightExp, dbType);
expr = shiftRest(expr);
}
return expr;
}
public SQLExpr and() {
SQLExpr expr = relational();
if (lexer.token == Token.AND || lexer.token == Token.AMPAMP) {
expr = andRest(expr);
}
return expr;
}
public SQLExpr andRest(SQLExpr expr) {
public SQLExpr xor() {
SQLExpr expr = and();
if (lexer.token == Token.XOR) {
expr = xorRest(expr);
}
return expr;
}
}
public SQLExpr xorRest(SQLExpr expr) {
for (;;) {
if (lexer.token == Token.XOR) {
lexer.nextToken();
SQLExpr rightExp = and();
expr = new SQLBinaryOpExpr(expr, SQLBinaryOperator.BooleanXor, rightExp, dbType);
} else {
break;
}
}
return expr;
}
public SQLExpr or() {
SQLExpr expr = xor();
if (lexer.token == Token.OR || lexer.token == Token.BARBAR) {
expr = orRest(expr);
}
return expr;
}
public SQLExpr orRest(SQLExpr expr) {
}
public SQLExpr relational() {
SQLExpr expr = bitOr();
return relationalRest(expr);
}
public SQLExpr relationalRest(SQLExpr expr) {
......
}
public SQLExpr notRationalRest(SQLExpr expr) {
switch (lexer.token) {
case LIKE:
......
break;
case IN:
.....
break;
case CONTAINS:
......
break;
case BETWEEN:
......
break;
case RLIKE:
......
break;
case IDENTIFIER:
long hash = lexer.hash_lower;
if (hash == FnvHash.Constants.REGEXP) {
lexer.nextToken();
rightExp = bitOr();
expr = new SQLBinaryOpExpr(expr, SQLBinaryOperator.NotRegExp, rightExp, getDbType());
}
break;
default:
throw new ParserException("TODO " + lexer.info());
}
return expr;
}
}
com.alibaba.druid.sql.dialect.mysql.parser.MySqlExprParser
public SQLExpr primary() {
final Token tok = lexer.token();
if (lexer.identifierEquals(FnvHash.Constants.OUTFILE)) {
lexer.nextToken();
SQLExpr file = primary();
SQLExpr expr = new MySqlOutFileExpr(file);
return primaryRest(expr);
}
switch (tok) {
case VARIANT:
SQLVariantRefExpr varRefExpr = new SQLVariantRefExpr(lexer.stringVal());
lexer.nextToken();
if (varRefExpr.getName().equalsIgnoreCase("@@global")) {
accept(Token.DOT);
varRefExpr = new SQLVariantRefExpr(lexer.stringVal(), true);
lexer.nextToken();
} else if (varRefExpr.getName().equals("@") && lexer.token() == Token.LITERAL_CHARS) {
varRefExpr.setName("@'" + lexer.stringVal() + "'");
lexer.nextToken();
} else if (varRefExpr.getName().equals("@@") && lexer.token() == Token.LITERAL_CHARS) {
varRefExpr.setName("@@'" + lexer.stringVal() + "'");
lexer.nextToken();
}
return primaryRest(varRefExpr);
case VALUES:
lexer.nextToken();
if (lexer.token() != Token.LPAREN) {
throw new ParserException("syntax error, illegal values clause. " + lexer.info());
}
return this.methodRest(new SQLIdentifierExpr("VALUES"), true);
case BINARY:
lexer.nextToken();
if (lexer.token() == Token.COMMA || lexer.token() == Token.SEMI || lexer.token() == Token.EOF) {
return new SQLIdentifierExpr("BINARY");
} else {
SQLUnaryExpr binaryExpr = new SQLUnaryExpr(SQLUnaryOperator.BINARY, expr());
return primaryRest(binaryExpr);
}
default:
return super.primary();
}
}
public final SQLExpr primaryRest(SQLExpr expr) {
if (expr == null) {
throw new IllegalArgumentException("expr");
}
if (lexer.token() == Token.LITERAL_CHARS) {
if (expr instanceof SQLIdentifierExpr) {
SQLIdentifierExpr identExpr = (SQLIdentifierExpr) expr;
String ident = identExpr.getName();
if (ident.equalsIgnoreCase("x")) {
String charValue = lexer.stringVal();
lexer.nextToken();
expr = new SQLHexExpr(charValue);
return primaryRest(expr);
// } else if (ident.equalsIgnoreCase("b")) {
// String charValue = lexer.stringVal();
// lexer.nextToken();
// expr = new SQLBinaryExpr(charValue);
//
// return primaryRest(expr);
} else if (ident.startsWith("_")) {
String charValue = lexer.stringVal();
lexer.nextToken();
MySqlCharExpr mysqlCharExpr = new MySqlCharExpr(charValue);
mysqlCharExpr.setCharset(identExpr.getName());
if (lexer.identifierEquals(FnvHash.Constants.COLLATE)) {
lexer.nextToken();
String collate = lexer.stringVal();
mysqlCharExpr.setCollate(collate);
accept(Token.IDENTIFIER);
}
expr = mysqlCharExpr;
return primaryRest(expr);
}
} else if (expr instanceof SQLCharExpr) {
String text2 = ((SQLCharExpr) expr).getText();
do {
String chars = lexer.stringVal();
text2 += chars;
lexer.nextToken();
} while (lexer.token() == Token.LITERAL_CHARS || lexer.token() == Token.LITERAL_ALIAS);
expr = new SQLCharExpr(text2);
} else if (expr instanceof SQLVariantRefExpr) {
SQLMethodInvokeExpr concat = new SQLMethodInvokeExpr("CONCAT");
concat.addArgument(expr);
concat.addArgument(this.primary());
expr = concat;
return primaryRest(expr);
}
} else if (lexer.token() == Token.IDENTIFIER) {
if (expr instanceof SQLHexExpr) {
if ("USING".equalsIgnoreCase(lexer.stringVal())) {
lexer.nextToken();
if (lexer.token() != Token.IDENTIFIER) {
throw new ParserException("syntax error, illegal hex. " + lexer.info());
}
String charSet = lexer.stringVal();
lexer.nextToken();
expr.getAttributes().put("USING", charSet);
return primaryRest(expr);
}
} else if (lexer.identifierEquals(FnvHash.Constants.COLLATE)) {
lexer.nextToken();
if (lexer.token() == Token.EQ) {
lexer.nextToken();
}
if (lexer.token() != Token.IDENTIFIER
&& lexer.token() != Token.LITERAL_CHARS) {
throw new ParserException("syntax error. " + lexer.info());
}
String collate = lexer.stringVal();
lexer.nextToken();
SQLBinaryOpExpr binaryExpr = new SQLBinaryOpExpr(expr, SQLBinaryOperator.COLLATE,
new SQLIdentifierExpr(collate), JdbcConstants.MYSQL);
expr = binaryExpr;
return primaryRest(expr);
} else if (expr instanceof SQLVariantRefExpr) {
if (lexer.identifierEquals(FnvHash.Constants.COLLATE)) {
lexer.nextToken();
if (lexer.token() != Token.IDENTIFIER
&& lexer.token() != Token.LITERAL_CHARS) {
throw new ParserException("syntax error. " + lexer.info());
}
String collate = lexer.stringVal();
lexer.nextToken();
expr.putAttribute("COLLATE", collate);
return primaryRest(expr);
}
}
}
// if (lexer.token() == Token.LPAREN && expr instanceof SQLIdentifierExpr) {
// SQLIdentifierExpr identExpr = (SQLIdentifierExpr) expr;
// String ident = identExpr.getName();
//
// if ("POSITION".equalsIgnoreCase(ident)) {
// return parsePosition();
// }
// }
if (lexer.token() == Token.VARIANT && "@".equals(lexer.stringVal())) {
return userNameRest(expr);
}
if (lexer.token() == Token.ERROR) {
throw new ParserException("syntax error. " + lexer.info());
}
return super.primaryRest(expr);
}