161 lines
3.4 KiB
Go
161 lines
3.4 KiB
Go
|
|
package lexer
|
||
|
|
|
||
|
|
import "code.jmug.me/jmug/interpreter-in-go/pkg/token"
|
||
|
|
|
||
|
|
type Lexer struct {
|
||
|
|
input string
|
||
|
|
position int
|
||
|
|
readPosition int
|
||
|
|
ch byte
|
||
|
|
}
|
||
|
|
|
||
|
|
func New(input string) *Lexer {
|
||
|
|
l := &Lexer{input: input}
|
||
|
|
l.readChar()
|
||
|
|
return l
|
||
|
|
}
|
||
|
|
|
||
|
|
func (l *Lexer) NextToken() token.Token {
|
||
|
|
l.skipWhitespace()
|
||
|
|
var tok token.Token
|
||
|
|
switch l.ch {
|
||
|
|
case '=':
|
||
|
|
if l.peekChar() == '=' {
|
||
|
|
ch := l.ch
|
||
|
|
l.readChar()
|
||
|
|
literal := string(ch) + string(l.ch)
|
||
|
|
tok.Type = token.EQ
|
||
|
|
tok.Literal = literal
|
||
|
|
} else {
|
||
|
|
tok = newToken(token.ASSIGN, l.ch)
|
||
|
|
}
|
||
|
|
case '+':
|
||
|
|
tok = newToken(token.PLUS, l.ch)
|
||
|
|
case '-':
|
||
|
|
tok = newToken(token.MINUS, l.ch)
|
||
|
|
case '!':
|
||
|
|
if l.peekChar() == '=' {
|
||
|
|
ch := l.ch
|
||
|
|
l.readChar()
|
||
|
|
literal := string(ch) + string(l.ch)
|
||
|
|
tok.Type = token.NOT_EQ
|
||
|
|
tok.Literal = literal
|
||
|
|
} else {
|
||
|
|
tok = newToken(token.BANG, l.ch)
|
||
|
|
}
|
||
|
|
case '*':
|
||
|
|
tok = newToken(token.ASTERISK, l.ch)
|
||
|
|
case '/':
|
||
|
|
tok = newToken(token.SLASH, l.ch)
|
||
|
|
case '<':
|
||
|
|
tok = newToken(token.LT, l.ch)
|
||
|
|
case '>':
|
||
|
|
tok = newToken(token.GT, l.ch)
|
||
|
|
case ',':
|
||
|
|
tok = newToken(token.COMMA, l.ch)
|
||
|
|
case ';':
|
||
|
|
tok = newToken(token.SEMICOLON, l.ch)
|
||
|
|
case ':':
|
||
|
|
tok = newToken(token.COLON, l.ch)
|
||
|
|
case '(':
|
||
|
|
tok = newToken(token.LPAREN, l.ch)
|
||
|
|
case ')':
|
||
|
|
tok = newToken(token.RPAREN, l.ch)
|
||
|
|
case '{':
|
||
|
|
tok = newToken(token.LBRACE, l.ch)
|
||
|
|
case '}':
|
||
|
|
tok = newToken(token.RBRACE, l.ch)
|
||
|
|
case '[':
|
||
|
|
tok = newToken(token.LBRACKET, l.ch)
|
||
|
|
case ']':
|
||
|
|
tok = newToken(token.RBRACKET, l.ch)
|
||
|
|
case '"':
|
||
|
|
tok.Type = token.STRING
|
||
|
|
tok.Literal = l.readString()
|
||
|
|
case 0:
|
||
|
|
tok.Literal = ""
|
||
|
|
tok.Type = token.EOF
|
||
|
|
default:
|
||
|
|
if isLetter(l.ch) {
|
||
|
|
tok.Literal = l.readIdentifier()
|
||
|
|
tok.Type = token.LookupIdent(tok.Literal)
|
||
|
|
// Don't let it fall through because readIdentifier calls readChar.
|
||
|
|
return tok
|
||
|
|
} else if isDigit(l.ch) {
|
||
|
|
tok.Literal = l.readNumber()
|
||
|
|
tok.Type = token.INT
|
||
|
|
// Don't let it fall through because readNumber calls readChar.
|
||
|
|
return tok
|
||
|
|
} else {
|
||
|
|
tok = newToken(token.ILLEGAL, l.ch)
|
||
|
|
}
|
||
|
|
}
|
||
|
|
l.readChar()
|
||
|
|
return tok
|
||
|
|
}
|
||
|
|
|
||
|
|
func (l *Lexer) readChar() {
|
||
|
|
if l.readPosition >= len(l.input) {
|
||
|
|
l.ch = 0
|
||
|
|
} else {
|
||
|
|
l.ch = l.input[l.readPosition]
|
||
|
|
}
|
||
|
|
l.position = l.readPosition
|
||
|
|
l.readPosition += 1
|
||
|
|
}
|
||
|
|
|
||
|
|
func (l *Lexer) peekChar() byte {
|
||
|
|
if l.readPosition >= len(l.input) {
|
||
|
|
return 0
|
||
|
|
}
|
||
|
|
return l.input[l.readPosition]
|
||
|
|
}
|
||
|
|
|
||
|
|
func (l *Lexer) readIdentifier() string {
|
||
|
|
position := l.position
|
||
|
|
for isLetter(l.ch) {
|
||
|
|
l.readChar()
|
||
|
|
}
|
||
|
|
// Slicing until l.position instead of readPosition because the last read
|
||
|
|
// char was not a letter.
|
||
|
|
return l.input[position:l.position]
|
||
|
|
}
|
||
|
|
|
||
|
|
func (l *Lexer) readNumber() string {
|
||
|
|
position := l.position
|
||
|
|
for isDigit(l.ch) {
|
||
|
|
l.readChar()
|
||
|
|
}
|
||
|
|
// Slicing until l.position instead of readPosition because the last read
|
||
|
|
// char was not a letter.
|
||
|
|
return l.input[position:l.position]
|
||
|
|
}
|
||
|
|
|
||
|
|
func (l *Lexer) readString() string {
|
||
|
|
// Don't include the quotes in the literal.
|
||
|
|
position := l.position + 1
|
||
|
|
l.readChar()
|
||
|
|
for l.ch != '"' && l.ch != 0 {
|
||
|
|
l.readChar()
|
||
|
|
}
|
||
|
|
return l.input[position:l.position]
|
||
|
|
}
|
||
|
|
|
||
|
|
func (l *Lexer) skipWhitespace() {
|
||
|
|
for l.ch == ' ' || l.ch == '\t' || l.ch == '\n' || l.ch == '\r' {
|
||
|
|
l.readChar()
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
func newToken(tokenType token.TokenType, ch byte) token.Token {
|
||
|
|
return token.Token{Type: tokenType, Literal: string(ch)}
|
||
|
|
}
|
||
|
|
|
||
|
|
func isLetter(ch byte) bool {
|
||
|
|
return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') || ch == '_'
|
||
|
|
}
|
||
|
|
|
||
|
|
func isDigit(ch byte) bool {
|
||
|
|
return '0' <= ch && ch <= '9'
|
||
|
|
}
|