Download and add the "code" folder.

Signed-off-by: jmug <u.g.a.mariano@gmail.com>
This commit is contained in:
Mariano Uvalle 2025-01-01 15:44:44 -08:00
parent 4d018b4ceb
commit 9aa807b9c2
55 changed files with 9090 additions and 0 deletions

1
waiig_code_1.7/01/.envrc Normal file
View file

@ -0,0 +1 @@
export GOPATH=$(pwd)

View file

@ -0,0 +1,3 @@
module monkey
go 1.14

View file

@ -0,0 +1,137 @@
package lexer
import "monkey/token"
type Lexer struct {
input string
position int // current position in input (points to current char)
readPosition int // current reading position in input (after current char)
ch byte // current char under examination
}
func New(input string) *Lexer {
l := &Lexer{input: input}
l.readChar()
return l
}
func (l *Lexer) NextToken() token.Token {
var tok token.Token
l.skipWhitespace()
switch l.ch {
case '=':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
literal := string(ch) + string(l.ch)
tok = token.Token{Type: token.EQ, Literal: literal}
} else {
tok = newToken(token.ASSIGN, l.ch)
}
case '+':
tok = newToken(token.PLUS, l.ch)
case '-':
tok = newToken(token.MINUS, l.ch)
case '!':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
literal := string(ch) + string(l.ch)
tok = token.Token{Type: token.NOT_EQ, Literal: literal}
} else {
tok = newToken(token.BANG, l.ch)
}
case '/':
tok = newToken(token.SLASH, l.ch)
case '*':
tok = newToken(token.ASTERISK, l.ch)
case '<':
tok = newToken(token.LT, l.ch)
case '>':
tok = newToken(token.GT, l.ch)
case ';':
tok = newToken(token.SEMICOLON, l.ch)
case ',':
tok = newToken(token.COMMA, l.ch)
case '{':
tok = newToken(token.LBRACE, l.ch)
case '}':
tok = newToken(token.RBRACE, l.ch)
case '(':
tok = newToken(token.LPAREN, l.ch)
case ')':
tok = newToken(token.RPAREN, l.ch)
case 0:
tok.Literal = ""
tok.Type = token.EOF
default:
if isLetter(l.ch) {
tok.Literal = l.readIdentifier()
tok.Type = token.LookupIdent(tok.Literal)
return tok
} else if isDigit(l.ch) {
tok.Type = token.INT
tok.Literal = l.readNumber()
return tok
} else {
tok = newToken(token.ILLEGAL, l.ch)
}
}
l.readChar()
return tok
}
func (l *Lexer) skipWhitespace() {
for l.ch == ' ' || l.ch == '\t' || l.ch == '\n' || l.ch == '\r' {
l.readChar()
}
}
func (l *Lexer) readChar() {
if l.readPosition >= len(l.input) {
l.ch = 0
} else {
l.ch = l.input[l.readPosition]
}
l.position = l.readPosition
l.readPosition += 1
}
func (l *Lexer) peekChar() byte {
if l.readPosition >= len(l.input) {
return 0
} else {
return l.input[l.readPosition]
}
}
func (l *Lexer) readIdentifier() string {
position := l.position
for isLetter(l.ch) {
l.readChar()
}
return l.input[position:l.position]
}
func (l *Lexer) readNumber() string {
position := l.position
for isDigit(l.ch) {
l.readChar()
}
return l.input[position:l.position]
}
func isLetter(ch byte) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_'
}
func isDigit(ch byte) bool {
return '0' <= ch && ch <= '9'
}
func newToken(tokenType token.TokenType, ch byte) token.Token {
return token.Token{Type: tokenType, Literal: string(ch)}
}

View file

@ -0,0 +1,126 @@
package lexer
import (
"testing"
"monkey/token"
)
func TestNextToken(t *testing.T) {
input := `let five = 5;
let ten = 10;
let add = fn(x, y) {
x + y;
};
let result = add(five, ten);
!-/*5;
5 < 10 > 5;
if (5 < 10) {
return true;
} else {
return false;
}
10 == 10;
10 != 9;
`
tests := []struct {
expectedType token.TokenType
expectedLiteral string
}{
{token.LET, "let"},
{token.IDENT, "five"},
{token.ASSIGN, "="},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "ten"},
{token.ASSIGN, "="},
{token.INT, "10"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "add"},
{token.ASSIGN, "="},
{token.FUNCTION, "fn"},
{token.LPAREN, "("},
{token.IDENT, "x"},
{token.COMMA, ","},
{token.IDENT, "y"},
{token.RPAREN, ")"},
{token.LBRACE, "{"},
{token.IDENT, "x"},
{token.PLUS, "+"},
{token.IDENT, "y"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "result"},
{token.ASSIGN, "="},
{token.IDENT, "add"},
{token.LPAREN, "("},
{token.IDENT, "five"},
{token.COMMA, ","},
{token.IDENT, "ten"},
{token.RPAREN, ")"},
{token.SEMICOLON, ";"},
{token.BANG, "!"},
{token.MINUS, "-"},
{token.SLASH, "/"},
{token.ASTERISK, "*"},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.INT, "5"},
{token.LT, "<"},
{token.INT, "10"},
{token.GT, ">"},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.IF, "if"},
{token.LPAREN, "("},
{token.INT, "5"},
{token.LT, "<"},
{token.INT, "10"},
{token.RPAREN, ")"},
{token.LBRACE, "{"},
{token.RETURN, "return"},
{token.TRUE, "true"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.ELSE, "else"},
{token.LBRACE, "{"},
{token.RETURN, "return"},
{token.FALSE, "false"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.INT, "10"},
{token.EQ, "=="},
{token.INT, "10"},
{token.SEMICOLON, ";"},
{token.INT, "10"},
{token.NOT_EQ, "!="},
{token.INT, "9"},
{token.SEMICOLON, ";"},
{token.EOF, ""},
}
l := New(input)
for i, tt := range tests {
tok := l.NextToken()
if tok.Type != tt.expectedType {
t.Fatalf("tests[%d] - tokentype wrong. expected=%q, got=%q",
i, tt.expectedType, tok.Type)
}
if tok.Literal != tt.expectedLiteral {
t.Fatalf("tests[%d] - literal wrong. expected=%q, got=%q",
i, tt.expectedLiteral, tok.Literal)
}
}
}

View file

@ -0,0 +1,19 @@
package main
import (
"fmt"
"monkey/repl"
"os"
"os/user"
)
func main() {
user, err := user.Current()
if err != nil {
panic(err)
}
fmt.Printf("Hello %s! This is the Monkey programming language!\n",
user.Username)
fmt.Printf("Feel free to type in commands\n")
repl.Start(os.Stdin, os.Stdout)
}

View file

@ -0,0 +1,30 @@
package repl
import (
"bufio"
"fmt"
"io"
"monkey/lexer"
"monkey/token"
)
const PROMPT = ">> "
func Start(in io.Reader, out io.Writer) {
scanner := bufio.NewScanner(in)
for {
fmt.Fprintf(out, PROMPT)
scanned := scanner.Scan()
if !scanned {
return
}
line := scanner.Text()
l := lexer.New(line)
for tok := l.NextToken(); tok.Type != token.EOF; tok = l.NextToken() {
fmt.Fprintf(out, "%+v\n", tok)
}
}
}

View file

@ -0,0 +1,66 @@
package token
type TokenType string
const (
ILLEGAL = "ILLEGAL"
EOF = "EOF"
// Identifiers + literals
IDENT = "IDENT" // add, foobar, x, y, ...
INT = "INT" // 1343456
// Operators
ASSIGN = "="
PLUS = "+"
MINUS = "-"
BANG = "!"
ASTERISK = "*"
SLASH = "/"
LT = "<"
GT = ">"
EQ = "=="
NOT_EQ = "!="
// Delimiters
COMMA = ","
SEMICOLON = ";"
LPAREN = "("
RPAREN = ")"
LBRACE = "{"
RBRACE = "}"
// Keywords
FUNCTION = "FUNCTION"
LET = "LET"
TRUE = "TRUE"
FALSE = "FALSE"
IF = "IF"
ELSE = "ELSE"
RETURN = "RETURN"
)
type Token struct {
Type TokenType
Literal string
}
var keywords = map[string]TokenType{
"fn": FUNCTION,
"let": LET,
"true": TRUE,
"false": FALSE,
"if": IF,
"else": ELSE,
"return": RETURN,
}
func LookupIdent(ident string) TokenType {
if tok, ok := keywords[ident]; ok {
return tok
}
return IDENT
}

1
waiig_code_1.7/02/.envrc Normal file
View file

@ -0,0 +1 @@
export GOPATH=$(pwd)

View file

@ -0,0 +1,266 @@
package ast
import (
"bytes"
"monkey/token"
"strings"
)
// The base Node interface
type Node interface {
TokenLiteral() string
String() string
}
// All statement nodes implement this
type Statement interface {
Node
statementNode()
}
// All expression nodes implement this
type Expression interface {
Node
expressionNode()
}
type Program struct {
Statements []Statement
}
func (p *Program) TokenLiteral() string {
if len(p.Statements) > 0 {
return p.Statements[0].TokenLiteral()
} else {
return ""
}
}
func (p *Program) String() string {
var out bytes.Buffer
for _, s := range p.Statements {
out.WriteString(s.String())
}
return out.String()
}
// Statements
type LetStatement struct {
Token token.Token // the token.LET token
Name *Identifier
Value Expression
}
func (ls *LetStatement) statementNode() {}
func (ls *LetStatement) TokenLiteral() string { return ls.Token.Literal }
func (ls *LetStatement) String() string {
var out bytes.Buffer
out.WriteString(ls.TokenLiteral() + " ")
out.WriteString(ls.Name.String())
out.WriteString(" = ")
if ls.Value != nil {
out.WriteString(ls.Value.String())
}
out.WriteString(";")
return out.String()
}
type ReturnStatement struct {
Token token.Token // the 'return' token
ReturnValue Expression
}
func (rs *ReturnStatement) statementNode() {}
func (rs *ReturnStatement) TokenLiteral() string { return rs.Token.Literal }
func (rs *ReturnStatement) String() string {
var out bytes.Buffer
out.WriteString(rs.TokenLiteral() + " ")
if rs.ReturnValue != nil {
out.WriteString(rs.ReturnValue.String())
}
out.WriteString(";")
return out.String()
}
type ExpressionStatement struct {
Token token.Token // the first token of the expression
Expression Expression
}
func (es *ExpressionStatement) statementNode() {}
func (es *ExpressionStatement) TokenLiteral() string { return es.Token.Literal }
func (es *ExpressionStatement) String() string {
if es.Expression != nil {
return es.Expression.String()
}
return ""
}
type BlockStatement struct {
Token token.Token // the { token
Statements []Statement
}
func (bs *BlockStatement) statementNode() {}
func (bs *BlockStatement) TokenLiteral() string { return bs.Token.Literal }
func (bs *BlockStatement) String() string {
var out bytes.Buffer
for _, s := range bs.Statements {
out.WriteString(s.String())
}
return out.String()
}
// Expressions
type Identifier struct {
Token token.Token // the token.IDENT token
Value string
}
func (i *Identifier) expressionNode() {}
func (i *Identifier) TokenLiteral() string { return i.Token.Literal }
func (i *Identifier) String() string { return i.Value }
type Boolean struct {
Token token.Token
Value bool
}
func (b *Boolean) expressionNode() {}
func (b *Boolean) TokenLiteral() string { return b.Token.Literal }
func (b *Boolean) String() string { return b.Token.Literal }
type IntegerLiteral struct {
Token token.Token
Value int64
}
func (il *IntegerLiteral) expressionNode() {}
func (il *IntegerLiteral) TokenLiteral() string { return il.Token.Literal }
func (il *IntegerLiteral) String() string { return il.Token.Literal }
type PrefixExpression struct {
Token token.Token // The prefix token, e.g. !
Operator string
Right Expression
}
func (pe *PrefixExpression) expressionNode() {}
func (pe *PrefixExpression) TokenLiteral() string { return pe.Token.Literal }
func (pe *PrefixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(pe.Operator)
out.WriteString(pe.Right.String())
out.WriteString(")")
return out.String()
}
type InfixExpression struct {
Token token.Token // The operator token, e.g. +
Left Expression
Operator string
Right Expression
}
func (ie *InfixExpression) expressionNode() {}
func (ie *InfixExpression) TokenLiteral() string { return ie.Token.Literal }
func (ie *InfixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(ie.Left.String())
out.WriteString(" " + ie.Operator + " ")
out.WriteString(ie.Right.String())
out.WriteString(")")
return out.String()
}
type IfExpression struct {
Token token.Token // The 'if' token
Condition Expression
Consequence *BlockStatement
Alternative *BlockStatement
}
func (ie *IfExpression) expressionNode() {}
func (ie *IfExpression) TokenLiteral() string { return ie.Token.Literal }
func (ie *IfExpression) String() string {
var out bytes.Buffer
out.WriteString("if")
out.WriteString(ie.Condition.String())
out.WriteString(" ")
out.WriteString(ie.Consequence.String())
if ie.Alternative != nil {
out.WriteString("else ")
out.WriteString(ie.Alternative.String())
}
return out.String()
}
type FunctionLiteral struct {
Token token.Token // The 'fn' token
Parameters []*Identifier
Body *BlockStatement
}
func (fl *FunctionLiteral) expressionNode() {}
func (fl *FunctionLiteral) TokenLiteral() string { return fl.Token.Literal }
func (fl *FunctionLiteral) String() string {
var out bytes.Buffer
params := []string{}
for _, p := range fl.Parameters {
params = append(params, p.String())
}
out.WriteString(fl.TokenLiteral())
out.WriteString("(")
out.WriteString(strings.Join(params, ", "))
out.WriteString(") ")
out.WriteString(fl.Body.String())
return out.String()
}
type CallExpression struct {
Token token.Token // The '(' token
Function Expression // Identifier or FunctionLiteral
Arguments []Expression
}
func (ce *CallExpression) expressionNode() {}
func (ce *CallExpression) TokenLiteral() string { return ce.Token.Literal }
func (ce *CallExpression) String() string {
var out bytes.Buffer
args := []string{}
for _, a := range ce.Arguments {
args = append(args, a.String())
}
out.WriteString(ce.Function.String())
out.WriteString("(")
out.WriteString(strings.Join(args, ", "))
out.WriteString(")")
return out.String()
}

View file

@ -0,0 +1,28 @@
package ast
import (
"monkey/token"
"testing"
)
func TestString(t *testing.T) {
program := &Program{
Statements: []Statement{
&LetStatement{
Token: token.Token{Type: token.LET, Literal: "let"},
Name: &Identifier{
Token: token.Token{Type: token.IDENT, Literal: "myVar"},
Value: "myVar",
},
Value: &Identifier{
Token: token.Token{Type: token.IDENT, Literal: "anotherVar"},
Value: "anotherVar",
},
},
},
}
if program.String() != "let myVar = anotherVar;" {
t.Errorf("program.String() wrong. got=%q", program.String())
}
}

View file

@ -0,0 +1,3 @@
module monkey
go 1.14

View file

@ -0,0 +1,137 @@
package lexer
import "monkey/token"
type Lexer struct {
input string
position int // current position in input (points to current char)
readPosition int // current reading position in input (after current char)
ch byte // current char under examination
}
func New(input string) *Lexer {
l := &Lexer{input: input}
l.readChar()
return l
}
func (l *Lexer) NextToken() token.Token {
var tok token.Token
l.skipWhitespace()
switch l.ch {
case '=':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
literal := string(ch) + string(l.ch)
tok = token.Token{Type: token.EQ, Literal: literal}
} else {
tok = newToken(token.ASSIGN, l.ch)
}
case '+':
tok = newToken(token.PLUS, l.ch)
case '-':
tok = newToken(token.MINUS, l.ch)
case '!':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
literal := string(ch) + string(l.ch)
tok = token.Token{Type: token.NOT_EQ, Literal: literal}
} else {
tok = newToken(token.BANG, l.ch)
}
case '/':
tok = newToken(token.SLASH, l.ch)
case '*':
tok = newToken(token.ASTERISK, l.ch)
case '<':
tok = newToken(token.LT, l.ch)
case '>':
tok = newToken(token.GT, l.ch)
case ';':
tok = newToken(token.SEMICOLON, l.ch)
case ',':
tok = newToken(token.COMMA, l.ch)
case '{':
tok = newToken(token.LBRACE, l.ch)
case '}':
tok = newToken(token.RBRACE, l.ch)
case '(':
tok = newToken(token.LPAREN, l.ch)
case ')':
tok = newToken(token.RPAREN, l.ch)
case 0:
tok.Literal = ""
tok.Type = token.EOF
default:
if isLetter(l.ch) {
tok.Literal = l.readIdentifier()
tok.Type = token.LookupIdent(tok.Literal)
return tok
} else if isDigit(l.ch) {
tok.Type = token.INT
tok.Literal = l.readNumber()
return tok
} else {
tok = newToken(token.ILLEGAL, l.ch)
}
}
l.readChar()
return tok
}
func (l *Lexer) skipWhitespace() {
for l.ch == ' ' || l.ch == '\t' || l.ch == '\n' || l.ch == '\r' {
l.readChar()
}
}
func (l *Lexer) readChar() {
if l.readPosition >= len(l.input) {
l.ch = 0
} else {
l.ch = l.input[l.readPosition]
}
l.position = l.readPosition
l.readPosition += 1
}
func (l *Lexer) peekChar() byte {
if l.readPosition >= len(l.input) {
return 0
} else {
return l.input[l.readPosition]
}
}
func (l *Lexer) readIdentifier() string {
position := l.position
for isLetter(l.ch) {
l.readChar()
}
return l.input[position:l.position]
}
func (l *Lexer) readNumber() string {
position := l.position
for isDigit(l.ch) {
l.readChar()
}
return l.input[position:l.position]
}
func isLetter(ch byte) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_'
}
func isDigit(ch byte) bool {
return '0' <= ch && ch <= '9'
}
func newToken(tokenType token.TokenType, ch byte) token.Token {
return token.Token{Type: tokenType, Literal: string(ch)}
}

View file

@ -0,0 +1,126 @@
package lexer
import (
"testing"
"monkey/token"
)
func TestNextToken(t *testing.T) {
input := `let five = 5;
let ten = 10;
let add = fn(x, y) {
x + y;
};
let result = add(five, ten);
!-/*5;
5 < 10 > 5;
if (5 < 10) {
return true;
} else {
return false;
}
10 == 10;
10 != 9;
`
tests := []struct {
expectedType token.TokenType
expectedLiteral string
}{
{token.LET, "let"},
{token.IDENT, "five"},
{token.ASSIGN, "="},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "ten"},
{token.ASSIGN, "="},
{token.INT, "10"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "add"},
{token.ASSIGN, "="},
{token.FUNCTION, "fn"},
{token.LPAREN, "("},
{token.IDENT, "x"},
{token.COMMA, ","},
{token.IDENT, "y"},
{token.RPAREN, ")"},
{token.LBRACE, "{"},
{token.IDENT, "x"},
{token.PLUS, "+"},
{token.IDENT, "y"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "result"},
{token.ASSIGN, "="},
{token.IDENT, "add"},
{token.LPAREN, "("},
{token.IDENT, "five"},
{token.COMMA, ","},
{token.IDENT, "ten"},
{token.RPAREN, ")"},
{token.SEMICOLON, ";"},
{token.BANG, "!"},
{token.MINUS, "-"},
{token.SLASH, "/"},
{token.ASTERISK, "*"},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.INT, "5"},
{token.LT, "<"},
{token.INT, "10"},
{token.GT, ">"},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.IF, "if"},
{token.LPAREN, "("},
{token.INT, "5"},
{token.LT, "<"},
{token.INT, "10"},
{token.RPAREN, ")"},
{token.LBRACE, "{"},
{token.RETURN, "return"},
{token.TRUE, "true"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.ELSE, "else"},
{token.LBRACE, "{"},
{token.RETURN, "return"},
{token.FALSE, "false"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.INT, "10"},
{token.EQ, "=="},
{token.INT, "10"},
{token.SEMICOLON, ";"},
{token.INT, "10"},
{token.NOT_EQ, "!="},
{token.INT, "9"},
{token.SEMICOLON, ";"},
{token.EOF, ""},
}
l := New(input)
for i, tt := range tests {
tok := l.NextToken()
if tok.Type != tt.expectedType {
t.Fatalf("tests[%d] - tokentype wrong. expected=%q, got=%q",
i, tt.expectedType, tok.Type)
}
if tok.Literal != tt.expectedLiteral {
t.Fatalf("tests[%d] - literal wrong. expected=%q, got=%q",
i, tt.expectedLiteral, tok.Literal)
}
}
}

View file

@ -0,0 +1,19 @@
package main
import (
"fmt"
"monkey/repl"
"os"
"os/user"
)
func main() {
user, err := user.Current()
if err != nil {
panic(err)
}
fmt.Printf("Hello %s! This is the Monkey programming language!\n",
user.Username)
fmt.Printf("Feel free to type in commands\n")
repl.Start(os.Stdin, os.Stdout)
}

View file

@ -0,0 +1,431 @@
package parser
import (
"fmt"
"monkey/ast"
"monkey/lexer"
"monkey/token"
"strconv"
)
const (
_ int = iota
LOWEST
EQUALS // ==
LESSGREATER // > or <
SUM // +
PRODUCT // *
PREFIX // -X or !X
CALL // myFunction(X)
)
var precedences = map[token.TokenType]int{
token.EQ: EQUALS,
token.NOT_EQ: EQUALS,
token.LT: LESSGREATER,
token.GT: LESSGREATER,
token.PLUS: SUM,
token.MINUS: SUM,
token.SLASH: PRODUCT,
token.ASTERISK: PRODUCT,
token.LPAREN: CALL,
}
type (
prefixParseFn func() ast.Expression
infixParseFn func(ast.Expression) ast.Expression
)
type Parser struct {
l *lexer.Lexer
errors []string
curToken token.Token
peekToken token.Token
prefixParseFns map[token.TokenType]prefixParseFn
infixParseFns map[token.TokenType]infixParseFn
}
func New(l *lexer.Lexer) *Parser {
p := &Parser{
l: l,
errors: []string{},
}
p.prefixParseFns = make(map[token.TokenType]prefixParseFn)
p.registerPrefix(token.IDENT, p.parseIdentifier)
p.registerPrefix(token.INT, p.parseIntegerLiteral)
p.registerPrefix(token.BANG, p.parsePrefixExpression)
p.registerPrefix(token.MINUS, p.parsePrefixExpression)
p.registerPrefix(token.TRUE, p.parseBoolean)
p.registerPrefix(token.FALSE, p.parseBoolean)
p.registerPrefix(token.LPAREN, p.parseGroupedExpression)
p.registerPrefix(token.IF, p.parseIfExpression)
p.registerPrefix(token.FUNCTION, p.parseFunctionLiteral)
p.infixParseFns = make(map[token.TokenType]infixParseFn)
p.registerInfix(token.PLUS, p.parseInfixExpression)
p.registerInfix(token.MINUS, p.parseInfixExpression)
p.registerInfix(token.SLASH, p.parseInfixExpression)
p.registerInfix(token.ASTERISK, p.parseInfixExpression)
p.registerInfix(token.EQ, p.parseInfixExpression)
p.registerInfix(token.NOT_EQ, p.parseInfixExpression)
p.registerInfix(token.LT, p.parseInfixExpression)
p.registerInfix(token.GT, p.parseInfixExpression)
p.registerInfix(token.LPAREN, p.parseCallExpression)
// Read two tokens, so curToken and peekToken are both set
p.nextToken()
p.nextToken()
return p
}
func (p *Parser) nextToken() {
p.curToken = p.peekToken
p.peekToken = p.l.NextToken()
}
func (p *Parser) curTokenIs(t token.TokenType) bool {
return p.curToken.Type == t
}
func (p *Parser) peekTokenIs(t token.TokenType) bool {
return p.peekToken.Type == t
}
func (p *Parser) expectPeek(t token.TokenType) bool {
if p.peekTokenIs(t) {
p.nextToken()
return true
} else {
p.peekError(t)
return false
}
}
func (p *Parser) Errors() []string {
return p.errors
}
func (p *Parser) peekError(t token.TokenType) {
msg := fmt.Sprintf("expected next token to be %s, got %s instead",
t, p.peekToken.Type)
p.errors = append(p.errors, msg)
}
func (p *Parser) noPrefixParseFnError(t token.TokenType) {
msg := fmt.Sprintf("no prefix parse function for %s found", t)
p.errors = append(p.errors, msg)
}
func (p *Parser) ParseProgram() *ast.Program {
program := &ast.Program{}
program.Statements = []ast.Statement{}
for !p.curTokenIs(token.EOF) {
stmt := p.parseStatement()
if stmt != nil {
program.Statements = append(program.Statements, stmt)
}
p.nextToken()
}
return program
}
func (p *Parser) parseStatement() ast.Statement {
switch p.curToken.Type {
case token.LET:
return p.parseLetStatement()
case token.RETURN:
return p.parseReturnStatement()
default:
return p.parseExpressionStatement()
}
}
func (p *Parser) parseLetStatement() *ast.LetStatement {
stmt := &ast.LetStatement{Token: p.curToken}
if !p.expectPeek(token.IDENT) {
return nil
}
stmt.Name = &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
if !p.expectPeek(token.ASSIGN) {
return nil
}
p.nextToken()
stmt.Value = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) parseReturnStatement() *ast.ReturnStatement {
stmt := &ast.ReturnStatement{Token: p.curToken}
p.nextToken()
stmt.ReturnValue = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) parseExpressionStatement() *ast.ExpressionStatement {
stmt := &ast.ExpressionStatement{Token: p.curToken}
stmt.Expression = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) parseExpression(precedence int) ast.Expression {
prefix := p.prefixParseFns[p.curToken.Type]
if prefix == nil {
p.noPrefixParseFnError(p.curToken.Type)
return nil
}
leftExp := prefix()
for !p.peekTokenIs(token.SEMICOLON) && precedence < p.peekPrecedence() {
infix := p.infixParseFns[p.peekToken.Type]
if infix == nil {
return leftExp
}
p.nextToken()
leftExp = infix(leftExp)
}
return leftExp
}
func (p *Parser) peekPrecedence() int {
if p, ok := precedences[p.peekToken.Type]; ok {
return p
}
return LOWEST
}
func (p *Parser) curPrecedence() int {
if p, ok := precedences[p.curToken.Type]; ok {
return p
}
return LOWEST
}
func (p *Parser) parseIdentifier() ast.Expression {
return &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
}
func (p *Parser) parseIntegerLiteral() ast.Expression {
lit := &ast.IntegerLiteral{Token: p.curToken}
value, err := strconv.ParseInt(p.curToken.Literal, 0, 64)
if err != nil {
msg := fmt.Sprintf("could not parse %q as integer", p.curToken.Literal)
p.errors = append(p.errors, msg)
return nil
}
lit.Value = value
return lit
}
func (p *Parser) parsePrefixExpression() ast.Expression {
expression := &ast.PrefixExpression{
Token: p.curToken,
Operator: p.curToken.Literal,
}
p.nextToken()
expression.Right = p.parseExpression(PREFIX)
return expression
}
func (p *Parser) parseInfixExpression(left ast.Expression) ast.Expression {
expression := &ast.InfixExpression{
Token: p.curToken,
Operator: p.curToken.Literal,
Left: left,
}
precedence := p.curPrecedence()
p.nextToken()
expression.Right = p.parseExpression(precedence)
return expression
}
func (p *Parser) parseBoolean() ast.Expression {
return &ast.Boolean{Token: p.curToken, Value: p.curTokenIs(token.TRUE)}
}
func (p *Parser) parseGroupedExpression() ast.Expression {
p.nextToken()
exp := p.parseExpression(LOWEST)
if !p.expectPeek(token.RPAREN) {
return nil
}
return exp
}
func (p *Parser) parseIfExpression() ast.Expression {
expression := &ast.IfExpression{Token: p.curToken}
if !p.expectPeek(token.LPAREN) {
return nil
}
p.nextToken()
expression.Condition = p.parseExpression(LOWEST)
if !p.expectPeek(token.RPAREN) {
return nil
}
if !p.expectPeek(token.LBRACE) {
return nil
}
expression.Consequence = p.parseBlockStatement()
if p.peekTokenIs(token.ELSE) {
p.nextToken()
if !p.expectPeek(token.LBRACE) {
return nil
}
expression.Alternative = p.parseBlockStatement()
}
return expression
}
func (p *Parser) parseBlockStatement() *ast.BlockStatement {
block := &ast.BlockStatement{Token: p.curToken}
block.Statements = []ast.Statement{}
p.nextToken()
for !p.curTokenIs(token.RBRACE) && !p.curTokenIs(token.EOF) {
stmt := p.parseStatement()
if stmt != nil {
block.Statements = append(block.Statements, stmt)
}
p.nextToken()
}
return block
}
func (p *Parser) parseFunctionLiteral() ast.Expression {
lit := &ast.FunctionLiteral{Token: p.curToken}
if !p.expectPeek(token.LPAREN) {
return nil
}
lit.Parameters = p.parseFunctionParameters()
if !p.expectPeek(token.LBRACE) {
return nil
}
lit.Body = p.parseBlockStatement()
return lit
}
func (p *Parser) parseFunctionParameters() []*ast.Identifier {
identifiers := []*ast.Identifier{}
if p.peekTokenIs(token.RPAREN) {
p.nextToken()
return identifiers
}
p.nextToken()
ident := &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
identifiers = append(identifiers, ident)
for p.peekTokenIs(token.COMMA) {
p.nextToken()
p.nextToken()
ident := &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
identifiers = append(identifiers, ident)
}
if !p.expectPeek(token.RPAREN) {
return nil
}
return identifiers
}
func (p *Parser) parseCallExpression(function ast.Expression) ast.Expression {
exp := &ast.CallExpression{Token: p.curToken, Function: function}
exp.Arguments = p.parseCallArguments()
return exp
}
func (p *Parser) parseCallArguments() []ast.Expression {
args := []ast.Expression{}
if p.peekTokenIs(token.RPAREN) {
p.nextToken()
return args
}
p.nextToken()
args = append(args, p.parseExpression(LOWEST))
for p.peekTokenIs(token.COMMA) {
p.nextToken()
p.nextToken()
args = append(args, p.parseExpression(LOWEST))
}
if !p.expectPeek(token.RPAREN) {
return nil
}
return args
}
func (p *Parser) registerPrefix(tokenType token.TokenType, fn prefixParseFn) {
p.prefixParseFns[tokenType] = fn
}
func (p *Parser) registerInfix(tokenType token.TokenType, fn infixParseFn) {
p.infixParseFns[tokenType] = fn
}

View file

@ -0,0 +1,817 @@
package parser
import (
"fmt"
"monkey/ast"
"monkey/lexer"
"testing"
)
func TestLetStatements(t *testing.T) {
tests := []struct {
input string
expectedIdentifier string
expectedValue interface{}
}{
{"let x = 5;", "x", 5},
{"let y = true;", "y", true},
{"let foobar = y;", "foobar", "y"},
}
for _, tt := range tests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain 1 statements. got=%d",
len(program.Statements))
}
stmt := program.Statements[0]
if !testLetStatement(t, stmt, tt.expectedIdentifier) {
return
}
val := stmt.(*ast.LetStatement).Value
if !testLiteralExpression(t, val, tt.expectedValue) {
return
}
}
}
func TestReturnStatements(t *testing.T) {
tests := []struct {
input string
expectedValue interface{}
}{
{"return 5;", 5},
{"return true;", true},
{"return foobar;", "foobar"},
}
for _, tt := range tests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain 1 statements. got=%d",
len(program.Statements))
}
stmt := program.Statements[0]
returnStmt, ok := stmt.(*ast.ReturnStatement)
if !ok {
t.Fatalf("stmt not *ast.ReturnStatement. got=%T", stmt)
}
if returnStmt.TokenLiteral() != "return" {
t.Fatalf("returnStmt.TokenLiteral not 'return', got %q",
returnStmt.TokenLiteral())
}
if testLiteralExpression(t, returnStmt.ReturnValue, tt.expectedValue) {
return
}
}
}
func TestIdentifierExpression(t *testing.T) {
input := "foobar;"
l := lexer.New(input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program has not enough statements. got=%d",
len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
ident, ok := stmt.Expression.(*ast.Identifier)
if !ok {
t.Fatalf("exp not *ast.Identifier. got=%T", stmt.Expression)
}
if ident.Value != "foobar" {
t.Errorf("ident.Value not %s. got=%s", "foobar", ident.Value)
}
if ident.TokenLiteral() != "foobar" {
t.Errorf("ident.TokenLiteral not %s. got=%s", "foobar",
ident.TokenLiteral())
}
}
func TestIntegerLiteralExpression(t *testing.T) {
input := "5;"
l := lexer.New(input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program has not enough statements. got=%d",
len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
literal, ok := stmt.Expression.(*ast.IntegerLiteral)
if !ok {
t.Fatalf("exp not *ast.IntegerLiteral. got=%T", stmt.Expression)
}
if literal.Value != 5 {
t.Errorf("literal.Value not %d. got=%d", 5, literal.Value)
}
if literal.TokenLiteral() != "5" {
t.Errorf("literal.TokenLiteral not %s. got=%s", "5",
literal.TokenLiteral())
}
}
func TestParsingPrefixExpressions(t *testing.T) {
prefixTests := []struct {
input string
operator string
value interface{}
}{
{"!5;", "!", 5},
{"-15;", "-", 15},
{"!foobar;", "!", "foobar"},
{"-foobar;", "-", "foobar"},
{"!true;", "!", true},
{"!false;", "!", false},
}
for _, tt := range prefixTests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain %d statements. got=%d\n",
1, len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
exp, ok := stmt.Expression.(*ast.PrefixExpression)
if !ok {
t.Fatalf("stmt is not ast.PrefixExpression. got=%T", stmt.Expression)
}
if exp.Operator != tt.operator {
t.Fatalf("exp.Operator is not '%s'. got=%s",
tt.operator, exp.Operator)
}
if !testLiteralExpression(t, exp.Right, tt.value) {
return
}
}
}
func TestParsingInfixExpressions(t *testing.T) {
infixTests := []struct {
input string
leftValue interface{}
operator string
rightValue interface{}
}{
{"5 + 5;", 5, "+", 5},
{"5 - 5;", 5, "-", 5},
{"5 * 5;", 5, "*", 5},
{"5 / 5;", 5, "/", 5},
{"5 > 5;", 5, ">", 5},
{"5 < 5;", 5, "<", 5},
{"5 == 5;", 5, "==", 5},
{"5 != 5;", 5, "!=", 5},
{"foobar + barfoo;", "foobar", "+", "barfoo"},
{"foobar - barfoo;", "foobar", "-", "barfoo"},
{"foobar * barfoo;", "foobar", "*", "barfoo"},
{"foobar / barfoo;", "foobar", "/", "barfoo"},
{"foobar > barfoo;", "foobar", ">", "barfoo"},
{"foobar < barfoo;", "foobar", "<", "barfoo"},
{"foobar == barfoo;", "foobar", "==", "barfoo"},
{"foobar != barfoo;", "foobar", "!=", "barfoo"},
{"true == true", true, "==", true},
{"true != false", true, "!=", false},
{"false == false", false, "==", false},
}
for _, tt := range infixTests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain %d statements. got=%d\n",
1, len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
if !testInfixExpression(t, stmt.Expression, tt.leftValue,
tt.operator, tt.rightValue) {
return
}
}
}
func TestOperatorPrecedenceParsing(t *testing.T) {
tests := []struct {
input string
expected string
}{
{
"-a * b",
"((-a) * b)",
},
{
"!-a",
"(!(-a))",
},
{
"a + b + c",
"((a + b) + c)",
},
{
"a + b - c",
"((a + b) - c)",
},
{
"a * b * c",
"((a * b) * c)",
},
{
"a * b / c",
"((a * b) / c)",
},
{
"a + b / c",
"(a + (b / c))",
},
{
"a + b * c + d / e - f",
"(((a + (b * c)) + (d / e)) - f)",
},
{
"3 + 4; -5 * 5",
"(3 + 4)((-5) * 5)",
},
{
"5 > 4 == 3 < 4",
"((5 > 4) == (3 < 4))",
},
{
"5 < 4 != 3 > 4",
"((5 < 4) != (3 > 4))",
},
{
"3 + 4 * 5 == 3 * 1 + 4 * 5",
"((3 + (4 * 5)) == ((3 * 1) + (4 * 5)))",
},
{
"true",
"true",
},
{
"false",
"false",
},
{
"3 > 5 == false",
"((3 > 5) == false)",
},
{
"3 < 5 == true",
"((3 < 5) == true)",
},
{
"1 + (2 + 3) + 4",
"((1 + (2 + 3)) + 4)",
},
{
"(5 + 5) * 2",
"((5 + 5) * 2)",
},
{
"2 / (5 + 5)",
"(2 / (5 + 5))",
},
{
"(5 + 5) * 2 * (5 + 5)",
"(((5 + 5) * 2) * (5 + 5))",
},
{
"-(5 + 5)",
"(-(5 + 5))",
},
{
"!(true == true)",
"(!(true == true))",
},
{
"a + add(b * c) + d",
"((a + add((b * c))) + d)",
},
{
"add(a, b, 1, 2 * 3, 4 + 5, add(6, 7 * 8))",
"add(a, b, 1, (2 * 3), (4 + 5), add(6, (7 * 8)))",
},
{
"add(a + b + c * d / f + g)",
"add((((a + b) + ((c * d) / f)) + g))",
},
}
for _, tt := range tests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
actual := program.String()
if actual != tt.expected {
t.Errorf("expected=%q, got=%q", tt.expected, actual)
}
}
}
func TestBooleanExpression(t *testing.T) {
tests := []struct {
input string
expectedBoolean bool
}{
{"true;", true},
{"false;", false},
}
for _, tt := range tests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program has not enough statements. got=%d",
len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
boolean, ok := stmt.Expression.(*ast.Boolean)
if !ok {
t.Fatalf("exp not *ast.Boolean. got=%T", stmt.Expression)
}
if boolean.Value != tt.expectedBoolean {
t.Errorf("boolean.Value not %t. got=%t", tt.expectedBoolean,
boolean.Value)
}
}
}
func TestIfExpression(t *testing.T) {
input := `if (x < y) { x }`
l := lexer.New(input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain %d statements. got=%d\n",
1, len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
exp, ok := stmt.Expression.(*ast.IfExpression)
if !ok {
t.Fatalf("stmt.Expression is not ast.IfExpression. got=%T",
stmt.Expression)
}
if !testInfixExpression(t, exp.Condition, "x", "<", "y") {
return
}
if len(exp.Consequence.Statements) != 1 {
t.Errorf("consequence is not 1 statements. got=%d\n",
len(exp.Consequence.Statements))
}
consequence, ok := exp.Consequence.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Statements[0] is not ast.ExpressionStatement. got=%T",
exp.Consequence.Statements[0])
}
if !testIdentifier(t, consequence.Expression, "x") {
return
}
if exp.Alternative != nil {
t.Errorf("exp.Alternative.Statements was not nil. got=%+v", exp.Alternative)
}
}
func TestIfElseExpression(t *testing.T) {
input := `if (x < y) { x } else { y }`
l := lexer.New(input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain %d statements. got=%d\n",
1, len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
exp, ok := stmt.Expression.(*ast.IfExpression)
if !ok {
t.Fatalf("stmt.Expression is not ast.IfExpression. got=%T", stmt.Expression)
}
if !testInfixExpression(t, exp.Condition, "x", "<", "y") {
return
}
if len(exp.Consequence.Statements) != 1 {
t.Errorf("consequence is not 1 statements. got=%d\n",
len(exp.Consequence.Statements))
}
consequence, ok := exp.Consequence.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Statements[0] is not ast.ExpressionStatement. got=%T",
exp.Consequence.Statements[0])
}
if !testIdentifier(t, consequence.Expression, "x") {
return
}
if len(exp.Alternative.Statements) != 1 {
t.Errorf("exp.Alternative.Statements does not contain 1 statements. got=%d\n",
len(exp.Alternative.Statements))
}
alternative, ok := exp.Alternative.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Statements[0] is not ast.ExpressionStatement. got=%T",
exp.Alternative.Statements[0])
}
if !testIdentifier(t, alternative.Expression, "y") {
return
}
}
func TestFunctionLiteralParsing(t *testing.T) {
input := `fn(x, y) { x + y; }`
l := lexer.New(input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain %d statements. got=%d\n",
1, len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
function, ok := stmt.Expression.(*ast.FunctionLiteral)
if !ok {
t.Fatalf("stmt.Expression is not ast.FunctionLiteral. got=%T",
stmt.Expression)
}
if len(function.Parameters) != 2 {
t.Fatalf("function literal parameters wrong. want 2, got=%d\n",
len(function.Parameters))
}
testLiteralExpression(t, function.Parameters[0], "x")
testLiteralExpression(t, function.Parameters[1], "y")
if len(function.Body.Statements) != 1 {
t.Fatalf("function.Body.Statements has not 1 statements. got=%d\n",
len(function.Body.Statements))
}
bodyStmt, ok := function.Body.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("function body stmt is not ast.ExpressionStatement. got=%T",
function.Body.Statements[0])
}
testInfixExpression(t, bodyStmt.Expression, "x", "+", "y")
}
func TestFunctionParameterParsing(t *testing.T) {
tests := []struct {
input string
expectedParams []string
}{
{input: "fn() {};", expectedParams: []string{}},
{input: "fn(x) {};", expectedParams: []string{"x"}},
{input: "fn(x, y, z) {};", expectedParams: []string{"x", "y", "z"}},
}
for _, tt := range tests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
stmt := program.Statements[0].(*ast.ExpressionStatement)
function := stmt.Expression.(*ast.FunctionLiteral)
if len(function.Parameters) != len(tt.expectedParams) {
t.Errorf("length parameters wrong. want %d, got=%d\n",
len(tt.expectedParams), len(function.Parameters))
}
for i, ident := range tt.expectedParams {
testLiteralExpression(t, function.Parameters[i], ident)
}
}
}
func TestCallExpressionParsing(t *testing.T) {
input := "add(1, 2 * 3, 4 + 5);"
l := lexer.New(input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain %d statements. got=%d\n",
1, len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("stmt is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
exp, ok := stmt.Expression.(*ast.CallExpression)
if !ok {
t.Fatalf("stmt.Expression is not ast.CallExpression. got=%T",
stmt.Expression)
}
if !testIdentifier(t, exp.Function, "add") {
return
}
if len(exp.Arguments) != 3 {
t.Fatalf("wrong length of arguments. got=%d", len(exp.Arguments))
}
testLiteralExpression(t, exp.Arguments[0], 1)
testInfixExpression(t, exp.Arguments[1], 2, "*", 3)
testInfixExpression(t, exp.Arguments[2], 4, "+", 5)
}
func TestCallExpressionParameterParsing(t *testing.T) {
tests := []struct {
input string
expectedIdent string
expectedArgs []string
}{
{
input: "add();",
expectedIdent: "add",
expectedArgs: []string{},
},
{
input: "add(1);",
expectedIdent: "add",
expectedArgs: []string{"1"},
},
{
input: "add(1, 2 * 3, 4 + 5);",
expectedIdent: "add",
expectedArgs: []string{"1", "(2 * 3)", "(4 + 5)"},
},
}
for _, tt := range tests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
stmt := program.Statements[0].(*ast.ExpressionStatement)
exp, ok := stmt.Expression.(*ast.CallExpression)
if !ok {
t.Fatalf("stmt.Expression is not ast.CallExpression. got=%T",
stmt.Expression)
}
if !testIdentifier(t, exp.Function, tt.expectedIdent) {
return
}
if len(exp.Arguments) != len(tt.expectedArgs) {
t.Fatalf("wrong number of arguments. want=%d, got=%d",
len(tt.expectedArgs), len(exp.Arguments))
}
for i, arg := range tt.expectedArgs {
if exp.Arguments[i].String() != arg {
t.Errorf("argument %d wrong. want=%q, got=%q", i,
arg, exp.Arguments[i].String())
}
}
}
}
func testLetStatement(t *testing.T, s ast.Statement, name string) bool {
if s.TokenLiteral() != "let" {
t.Errorf("s.TokenLiteral not 'let'. got=%q", s.TokenLiteral())
return false
}
letStmt, ok := s.(*ast.LetStatement)
if !ok {
t.Errorf("s not *ast.LetStatement. got=%T", s)
return false
}
if letStmt.Name.Value != name {
t.Errorf("letStmt.Name.Value not '%s'. got=%s", name, letStmt.Name.Value)
return false
}
if letStmt.Name.TokenLiteral() != name {
t.Errorf("letStmt.Name.TokenLiteral() not '%s'. got=%s",
name, letStmt.Name.TokenLiteral())
return false
}
return true
}
func testInfixExpression(t *testing.T, exp ast.Expression, left interface{},
operator string, right interface{}) bool {
opExp, ok := exp.(*ast.InfixExpression)
if !ok {
t.Errorf("exp is not ast.InfixExpression. got=%T(%s)", exp, exp)
return false
}
if !testLiteralExpression(t, opExp.Left, left) {
return false
}
if opExp.Operator != operator {
t.Errorf("exp.Operator is not '%s'. got=%q", operator, opExp.Operator)
return false
}
if !testLiteralExpression(t, opExp.Right, right) {
return false
}
return true
}
func testLiteralExpression(
t *testing.T,
exp ast.Expression,
expected interface{},
) bool {
switch v := expected.(type) {
case int:
return testIntegerLiteral(t, exp, int64(v))
case int64:
return testIntegerLiteral(t, exp, v)
case string:
return testIdentifier(t, exp, v)
case bool:
return testBooleanLiteral(t, exp, v)
}
t.Errorf("type of exp not handled. got=%T", exp)
return false
}
func testIntegerLiteral(t *testing.T, il ast.Expression, value int64) bool {
integ, ok := il.(*ast.IntegerLiteral)
if !ok {
t.Errorf("il not *ast.IntegerLiteral. got=%T", il)
return false
}
if integ.Value != value {
t.Errorf("integ.Value not %d. got=%d", value, integ.Value)
return false
}
if integ.TokenLiteral() != fmt.Sprintf("%d", value) {
t.Errorf("integ.TokenLiteral not %d. got=%s", value,
integ.TokenLiteral())
return false
}
return true
}
func testIdentifier(t *testing.T, exp ast.Expression, value string) bool {
ident, ok := exp.(*ast.Identifier)
if !ok {
t.Errorf("exp not *ast.Identifier. got=%T", exp)
return false
}
if ident.Value != value {
t.Errorf("ident.Value not %s. got=%s", value, ident.Value)
return false
}
if ident.TokenLiteral() != value {
t.Errorf("ident.TokenLiteral not %s. got=%s", value,
ident.TokenLiteral())
return false
}
return true
}
func testBooleanLiteral(t *testing.T, exp ast.Expression, value bool) bool {
bo, ok := exp.(*ast.Boolean)
if !ok {
t.Errorf("exp not *ast.Boolean. got=%T", exp)
return false
}
if bo.Value != value {
t.Errorf("bo.Value not %t. got=%t", value, bo.Value)
return false
}
if bo.TokenLiteral() != fmt.Sprintf("%t", value) {
t.Errorf("bo.TokenLiteral not %t. got=%s",
value, bo.TokenLiteral())
return false
}
return true
}
func checkParserErrors(t *testing.T, p *Parser) {
errors := p.Errors()
if len(errors) == 0 {
return
}
t.Errorf("parser has %d errors", len(errors))
for _, msg := range errors {
t.Errorf("parser error: %q", msg)
}
t.FailNow()
}

View file

@ -0,0 +1,32 @@
package parser
import (
"fmt"
"strings"
)
var traceLevel int = 0
const traceIdentPlaceholder string = "\t"
func identLevel() string {
return strings.Repeat(traceIdentPlaceholder, traceLevel-1)
}
func tracePrint(fs string) {
fmt.Printf("%s%s\n", identLevel(), fs)
}
func incIdent() { traceLevel = traceLevel + 1 }
func decIdent() { traceLevel = traceLevel - 1 }
func trace(msg string) string {
incIdent()
tracePrint("BEGIN " + msg)
return msg
}
func untrace(msg string) {
tracePrint("END " + msg)
decIdent()
}

View file

@ -0,0 +1,58 @@
package repl
import (
"bufio"
"fmt"
"io"
"monkey/lexer"
"monkey/parser"
)
const PROMPT = ">> "
func Start(in io.Reader, out io.Writer) {
scanner := bufio.NewScanner(in)
for {
fmt.Fprintf(out, PROMPT)
scanned := scanner.Scan()
if !scanned {
return
}
line := scanner.Text()
l := lexer.New(line)
p := parser.New(l)
program := p.ParseProgram()
if len(p.Errors()) != 0 {
printParserErrors(out, p.Errors())
continue
}
io.WriteString(out, program.String())
io.WriteString(out, "\n")
}
}
const MONKEY_FACE = ` __,__
.--. .-" "-. .--.
/ .. \/ .-. .-. \/ .. \
| | '| / Y \ |' | |
| \ \ \ 0 | 0 / / / |
\ '- ,\.-"""""""-./, -' /
''-' /_ ^ ^ _\ '-''
| \._ _./ |
\ \ '~' / /
'._ '-=-' _.'
'-----'
`
func printParserErrors(out io.Writer, errors []string) {
io.WriteString(out, MONKEY_FACE)
io.WriteString(out, "Woops! We ran into some monkey business here!\n")
io.WriteString(out, " parser errors:\n")
for _, msg := range errors {
io.WriteString(out, "\t"+msg+"\n")
}
}

View file

@ -0,0 +1,66 @@
package token
type TokenType string
const (
ILLEGAL = "ILLEGAL"
EOF = "EOF"
// Identifiers + literals
IDENT = "IDENT" // add, foobar, x, y, ...
INT = "INT" // 1343456
// Operators
ASSIGN = "="
PLUS = "+"
MINUS = "-"
BANG = "!"
ASTERISK = "*"
SLASH = "/"
LT = "<"
GT = ">"
EQ = "=="
NOT_EQ = "!="
// Delimiters
COMMA = ","
SEMICOLON = ";"
LPAREN = "("
RPAREN = ")"
LBRACE = "{"
RBRACE = "}"
// Keywords
FUNCTION = "FUNCTION"
LET = "LET"
TRUE = "TRUE"
FALSE = "FALSE"
IF = "IF"
ELSE = "ELSE"
RETURN = "RETURN"
)
type Token struct {
Type TokenType
Literal string
}
var keywords = map[string]TokenType{
"fn": FUNCTION,
"let": LET,
"true": TRUE,
"false": FALSE,
"if": IF,
"else": ELSE,
"return": RETURN,
}
func LookupIdent(ident string) TokenType {
if tok, ok := keywords[ident]; ok {
return tok
}
return IDENT
}

1
waiig_code_1.7/03/.envrc Normal file
View file

@ -0,0 +1 @@
export GOPATH=$(pwd)

View file

@ -0,0 +1,266 @@
package ast
import (
"bytes"
"monkey/token"
"strings"
)
// The base Node interface
type Node interface {
TokenLiteral() string
String() string
}
// All statement nodes implement this
type Statement interface {
Node
statementNode()
}
// All expression nodes implement this
type Expression interface {
Node
expressionNode()
}
type Program struct {
Statements []Statement
}
func (p *Program) TokenLiteral() string {
if len(p.Statements) > 0 {
return p.Statements[0].TokenLiteral()
} else {
return ""
}
}
func (p *Program) String() string {
var out bytes.Buffer
for _, s := range p.Statements {
out.WriteString(s.String())
}
return out.String()
}
// Statements
type LetStatement struct {
Token token.Token // the token.LET token
Name *Identifier
Value Expression
}
func (ls *LetStatement) statementNode() {}
func (ls *LetStatement) TokenLiteral() string { return ls.Token.Literal }
func (ls *LetStatement) String() string {
var out bytes.Buffer
out.WriteString(ls.TokenLiteral() + " ")
out.WriteString(ls.Name.String())
out.WriteString(" = ")
if ls.Value != nil {
out.WriteString(ls.Value.String())
}
out.WriteString(";")
return out.String()
}
type ReturnStatement struct {
Token token.Token // the 'return' token
ReturnValue Expression
}
func (rs *ReturnStatement) statementNode() {}
func (rs *ReturnStatement) TokenLiteral() string { return rs.Token.Literal }
func (rs *ReturnStatement) String() string {
var out bytes.Buffer
out.WriteString(rs.TokenLiteral() + " ")
if rs.ReturnValue != nil {
out.WriteString(rs.ReturnValue.String())
}
out.WriteString(";")
return out.String()
}
type ExpressionStatement struct {
Token token.Token // the first token of the expression
Expression Expression
}
func (es *ExpressionStatement) statementNode() {}
func (es *ExpressionStatement) TokenLiteral() string { return es.Token.Literal }
func (es *ExpressionStatement) String() string {
if es.Expression != nil {
return es.Expression.String()
}
return ""
}
type BlockStatement struct {
Token token.Token // the { token
Statements []Statement
}
func (bs *BlockStatement) statementNode() {}
func (bs *BlockStatement) TokenLiteral() string { return bs.Token.Literal }
func (bs *BlockStatement) String() string {
var out bytes.Buffer
for _, s := range bs.Statements {
out.WriteString(s.String())
}
return out.String()
}
// Expressions
type Identifier struct {
Token token.Token // the token.IDENT token
Value string
}
func (i *Identifier) expressionNode() {}
func (i *Identifier) TokenLiteral() string { return i.Token.Literal }
func (i *Identifier) String() string { return i.Value }
type Boolean struct {
Token token.Token
Value bool
}
func (b *Boolean) expressionNode() {}
func (b *Boolean) TokenLiteral() string { return b.Token.Literal }
func (b *Boolean) String() string { return b.Token.Literal }
type IntegerLiteral struct {
Token token.Token
Value int64
}
func (il *IntegerLiteral) expressionNode() {}
func (il *IntegerLiteral) TokenLiteral() string { return il.Token.Literal }
func (il *IntegerLiteral) String() string { return il.Token.Literal }
type PrefixExpression struct {
Token token.Token // The prefix token, e.g. !
Operator string
Right Expression
}
func (pe *PrefixExpression) expressionNode() {}
func (pe *PrefixExpression) TokenLiteral() string { return pe.Token.Literal }
func (pe *PrefixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(pe.Operator)
out.WriteString(pe.Right.String())
out.WriteString(")")
return out.String()
}
type InfixExpression struct {
Token token.Token // The operator token, e.g. +
Left Expression
Operator string
Right Expression
}
func (ie *InfixExpression) expressionNode() {}
func (ie *InfixExpression) TokenLiteral() string { return ie.Token.Literal }
func (ie *InfixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(ie.Left.String())
out.WriteString(" " + ie.Operator + " ")
out.WriteString(ie.Right.String())
out.WriteString(")")
return out.String()
}
type IfExpression struct {
Token token.Token // The 'if' token
Condition Expression
Consequence *BlockStatement
Alternative *BlockStatement
}
func (ie *IfExpression) expressionNode() {}
func (ie *IfExpression) TokenLiteral() string { return ie.Token.Literal }
func (ie *IfExpression) String() string {
var out bytes.Buffer
out.WriteString("if")
out.WriteString(ie.Condition.String())
out.WriteString(" ")
out.WriteString(ie.Consequence.String())
if ie.Alternative != nil {
out.WriteString("else ")
out.WriteString(ie.Alternative.String())
}
return out.String()
}
type FunctionLiteral struct {
Token token.Token // The 'fn' token
Parameters []*Identifier
Body *BlockStatement
}
func (fl *FunctionLiteral) expressionNode() {}
func (fl *FunctionLiteral) TokenLiteral() string { return fl.Token.Literal }
func (fl *FunctionLiteral) String() string {
var out bytes.Buffer
params := []string{}
for _, p := range fl.Parameters {
params = append(params, p.String())
}
out.WriteString(fl.TokenLiteral())
out.WriteString("(")
out.WriteString(strings.Join(params, ", "))
out.WriteString(") ")
out.WriteString(fl.Body.String())
return out.String()
}
type CallExpression struct {
Token token.Token // The '(' token
Function Expression // Identifier or FunctionLiteral
Arguments []Expression
}
func (ce *CallExpression) expressionNode() {}
func (ce *CallExpression) TokenLiteral() string { return ce.Token.Literal }
func (ce *CallExpression) String() string {
var out bytes.Buffer
args := []string{}
for _, a := range ce.Arguments {
args = append(args, a.String())
}
out.WriteString(ce.Function.String())
out.WriteString("(")
out.WriteString(strings.Join(args, ", "))
out.WriteString(")")
return out.String()
}

View file

@ -0,0 +1,28 @@
package ast
import (
"monkey/token"
"testing"
)
func TestString(t *testing.T) {
program := &Program{
Statements: []Statement{
&LetStatement{
Token: token.Token{Type: token.LET, Literal: "let"},
Name: &Identifier{
Token: token.Token{Type: token.IDENT, Literal: "myVar"},
Value: "myVar",
},
Value: &Identifier{
Token: token.Token{Type: token.IDENT, Literal: "anotherVar"},
Value: "anotherVar",
},
},
},
}
if program.String() != "let myVar = anotherVar;" {
t.Errorf("program.String() wrong. got=%q", program.String())
}
}

View file

@ -0,0 +1,325 @@
package evaluator
import (
"fmt"
"monkey/ast"
"monkey/object"
)
var (
NULL = &object.Null{}
TRUE = &object.Boolean{Value: true}
FALSE = &object.Boolean{Value: false}
)
func Eval(node ast.Node, env *object.Environment) object.Object {
switch node := node.(type) {
// Statements
case *ast.Program:
return evalProgram(node, env)
case *ast.BlockStatement:
return evalBlockStatement(node, env)
case *ast.ExpressionStatement:
return Eval(node.Expression, env)
case *ast.ReturnStatement:
val := Eval(node.ReturnValue, env)
if isError(val) {
return val
}
return &object.ReturnValue{Value: val}
case *ast.LetStatement:
val := Eval(node.Value, env)
if isError(val) {
return val
}
env.Set(node.Name.Value, val)
// Expressions
case *ast.IntegerLiteral:
return &object.Integer{Value: node.Value}
case *ast.Boolean:
return nativeBoolToBooleanObject(node.Value)
case *ast.PrefixExpression:
right := Eval(node.Right, env)
if isError(right) {
return right
}
return evalPrefixExpression(node.Operator, right)
case *ast.InfixExpression:
left := Eval(node.Left, env)
if isError(left) {
return left
}
right := Eval(node.Right, env)
if isError(right) {
return right
}
return evalInfixExpression(node.Operator, left, right)
case *ast.IfExpression:
return evalIfExpression(node, env)
case *ast.Identifier:
return evalIdentifier(node, env)
case *ast.FunctionLiteral:
params := node.Parameters
body := node.Body
return &object.Function{Parameters: params, Env: env, Body: body}
case *ast.CallExpression:
function := Eval(node.Function, env)
if isError(function) {
return function
}
args := evalExpressions(node.Arguments, env)
if len(args) == 1 && isError(args[0]) {
return args[0]
}
return applyFunction(function, args)
}
return nil
}
func evalProgram(program *ast.Program, env *object.Environment) object.Object {
var result object.Object
for _, statement := range program.Statements {
result = Eval(statement, env)
switch result := result.(type) {
case *object.ReturnValue:
return result.Value
case *object.Error:
return result
}
}
return result
}
func evalBlockStatement(
block *ast.BlockStatement,
env *object.Environment,
) object.Object {
var result object.Object
for _, statement := range block.Statements {
result = Eval(statement, env)
if result != nil {
rt := result.Type()
if rt == object.RETURN_VALUE_OBJ || rt == object.ERROR_OBJ {
return result
}
}
}
return result
}
func nativeBoolToBooleanObject(input bool) *object.Boolean {
if input {
return TRUE
}
return FALSE
}
func evalPrefixExpression(operator string, right object.Object) object.Object {
switch operator {
case "!":
return evalBangOperatorExpression(right)
case "-":
return evalMinusPrefixOperatorExpression(right)
default:
return newError("unknown operator: %s%s", operator, right.Type())
}
}
func evalInfixExpression(
operator string,
left, right object.Object,
) object.Object {
switch {
case left.Type() == object.INTEGER_OBJ && right.Type() == object.INTEGER_OBJ:
return evalIntegerInfixExpression(operator, left, right)
case operator == "==":
return nativeBoolToBooleanObject(left == right)
case operator == "!=":
return nativeBoolToBooleanObject(left != right)
case left.Type() != right.Type():
return newError("type mismatch: %s %s %s",
left.Type(), operator, right.Type())
default:
return newError("unknown operator: %s %s %s",
left.Type(), operator, right.Type())
}
}
func evalBangOperatorExpression(right object.Object) object.Object {
switch right {
case TRUE:
return FALSE
case FALSE:
return TRUE
case NULL:
return TRUE
default:
return FALSE
}
}
func evalMinusPrefixOperatorExpression(right object.Object) object.Object {
if right.Type() != object.INTEGER_OBJ {
return newError("unknown operator: -%s", right.Type())
}
value := right.(*object.Integer).Value
return &object.Integer{Value: -value}
}
func evalIntegerInfixExpression(
operator string,
left, right object.Object,
) object.Object {
leftVal := left.(*object.Integer).Value
rightVal := right.(*object.Integer).Value
switch operator {
case "+":
return &object.Integer{Value: leftVal + rightVal}
case "-":
return &object.Integer{Value: leftVal - rightVal}
case "*":
return &object.Integer{Value: leftVal * rightVal}
case "/":
return &object.Integer{Value: leftVal / rightVal}
case "<":
return nativeBoolToBooleanObject(leftVal < rightVal)
case ">":
return nativeBoolToBooleanObject(leftVal > rightVal)
case "==":
return nativeBoolToBooleanObject(leftVal == rightVal)
case "!=":
return nativeBoolToBooleanObject(leftVal != rightVal)
default:
return newError("unknown operator: %s %s %s",
left.Type(), operator, right.Type())
}
}
func evalIfExpression(
ie *ast.IfExpression,
env *object.Environment,
) object.Object {
condition := Eval(ie.Condition, env)
if isError(condition) {
return condition
}
if isTruthy(condition) {
return Eval(ie.Consequence, env)
} else if ie.Alternative != nil {
return Eval(ie.Alternative, env)
} else {
return NULL
}
}
func evalIdentifier(
node *ast.Identifier,
env *object.Environment,
) object.Object {
val, ok := env.Get(node.Value)
if !ok {
return newError("identifier not found: " + node.Value)
}
return val
}
func isTruthy(obj object.Object) bool {
switch obj {
case NULL:
return false
case TRUE:
return true
case FALSE:
return false
default:
return true
}
}
func newError(format string, a ...interface{}) *object.Error {
return &object.Error{Message: fmt.Sprintf(format, a...)}
}
func isError(obj object.Object) bool {
if obj != nil {
return obj.Type() == object.ERROR_OBJ
}
return false
}
func evalExpressions(
exps []ast.Expression,
env *object.Environment,
) []object.Object {
var result []object.Object
for _, e := range exps {
evaluated := Eval(e, env)
if isError(evaluated) {
return []object.Object{evaluated}
}
result = append(result, evaluated)
}
return result
}
func applyFunction(fn object.Object, args []object.Object) object.Object {
function, ok := fn.(*object.Function)
if !ok {
return newError("not a function: %s", fn.Type())
}
extendedEnv := extendFunctionEnv(function, args)
evaluated := Eval(function.Body, extendedEnv)
return unwrapReturnValue(evaluated)
}
func extendFunctionEnv(
fn *object.Function,
args []object.Object,
) *object.Environment {
env := object.NewEnclosedEnvironment(fn.Env)
for paramIdx, param := range fn.Parameters {
env.Set(param.Value, args[paramIdx])
}
return env
}
func unwrapReturnValue(obj object.Object) object.Object {
if returnValue, ok := obj.(*object.ReturnValue); ok {
return returnValue.Value
}
return obj
}

View file

@ -0,0 +1,351 @@
package evaluator
import (
"monkey/lexer"
"monkey/object"
"monkey/parser"
"testing"
)
func TestEvalIntegerExpression(t *testing.T) {
tests := []struct {
input string
expected int64
}{
{"5", 5},
{"10", 10},
{"-5", -5},
{"-10", -10},
{"5 + 5 + 5 + 5 - 10", 10},
{"2 * 2 * 2 * 2 * 2", 32},
{"-50 + 100 + -50", 0},
{"5 * 2 + 10", 20},
{"5 + 2 * 10", 25},
{"20 + 2 * -10", 0},
{"50 / 2 * 2 + 10", 60},
{"2 * (5 + 10)", 30},
{"3 * 3 * 3 + 10", 37},
{"3 * (3 * 3) + 10", 37},
{"(5 + 10 * 2 + 15 / 3) * 2 + -10", 50},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
testIntegerObject(t, evaluated, tt.expected)
}
}
func TestEvalBooleanExpression(t *testing.T) {
tests := []struct {
input string
expected bool
}{
{"true", true},
{"false", false},
{"1 < 2", true},
{"1 > 2", false},
{"1 < 1", false},
{"1 > 1", false},
{"1 == 1", true},
{"1 != 1", false},
{"1 == 2", false},
{"1 != 2", true},
{"true == true", true},
{"false == false", true},
{"true == false", false},
{"true != false", true},
{"false != true", true},
{"(1 < 2) == true", true},
{"(1 < 2) == false", false},
{"(1 > 2) == true", false},
{"(1 > 2) == false", true},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
testBooleanObject(t, evaluated, tt.expected)
}
}
func TestBangOperator(t *testing.T) {
tests := []struct {
input string
expected bool
}{
{"!true", false},
{"!false", true},
{"!5", false},
{"!!true", true},
{"!!false", false},
{"!!5", true},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
testBooleanObject(t, evaluated, tt.expected)
}
}
func TestIfElseExpressions(t *testing.T) {
tests := []struct {
input string
expected interface{}
}{
{"if (true) { 10 }", 10},
{"if (false) { 10 }", nil},
{"if (1) { 10 }", 10},
{"if (1 < 2) { 10 }", 10},
{"if (1 > 2) { 10 }", nil},
{"if (1 > 2) { 10 } else { 20 }", 20},
{"if (1 < 2) { 10 } else { 20 }", 10},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
integer, ok := tt.expected.(int)
if ok {
testIntegerObject(t, evaluated, int64(integer))
} else {
testNullObject(t, evaluated)
}
}
}
func TestReturnStatements(t *testing.T) {
tests := []struct {
input string
expected int64
}{
{"return 10;", 10},
{"return 10; 9;", 10},
{"return 2 * 5; 9;", 10},
{"9; return 2 * 5; 9;", 10},
{"if (10 > 1) { return 10; }", 10},
{
`
if (10 > 1) {
if (10 > 1) {
return 10;
}
return 1;
}
`,
10,
},
{
`
let f = fn(x) {
return x;
x + 10;
};
f(10);`,
10,
},
{
`
let f = fn(x) {
let result = x + 10;
return result;
return 10;
};
f(10);`,
20,
},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
testIntegerObject(t, evaluated, tt.expected)
}
}
func TestErrorHandling(t *testing.T) {
tests := []struct {
input string
expectedMessage string
}{
{
"5 + true;",
"type mismatch: INTEGER + BOOLEAN",
},
{
"5 + true; 5;",
"type mismatch: INTEGER + BOOLEAN",
},
{
"-true",
"unknown operator: -BOOLEAN",
},
{
"true + false;",
"unknown operator: BOOLEAN + BOOLEAN",
},
{
"true + false + true + false;",
"unknown operator: BOOLEAN + BOOLEAN",
},
{
"5; true + false; 5",
"unknown operator: BOOLEAN + BOOLEAN",
},
{
"if (10 > 1) { true + false; }",
"unknown operator: BOOLEAN + BOOLEAN",
},
{
`
if (10 > 1) {
if (10 > 1) {
return true + false;
}
return 1;
}
`,
"unknown operator: BOOLEAN + BOOLEAN",
},
{
"foobar",
"identifier not found: foobar",
},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
errObj, ok := evaluated.(*object.Error)
if !ok {
t.Errorf("no error object returned. got=%T(%+v)",
evaluated, evaluated)
continue
}
if errObj.Message != tt.expectedMessage {
t.Errorf("wrong error message. expected=%q, got=%q",
tt.expectedMessage, errObj.Message)
}
}
}
func TestLetStatements(t *testing.T) {
tests := []struct {
input string
expected int64
}{
{"let a = 5; a;", 5},
{"let a = 5 * 5; a;", 25},
{"let a = 5; let b = a; b;", 5},
{"let a = 5; let b = a; let c = a + b + 5; c;", 15},
}
for _, tt := range tests {
testIntegerObject(t, testEval(tt.input), tt.expected)
}
}
func TestFunctionObject(t *testing.T) {
input := "fn(x) { x + 2; };"
evaluated := testEval(input)
fn, ok := evaluated.(*object.Function)
if !ok {
t.Fatalf("object is not Function. got=%T (%+v)", evaluated, evaluated)
}
if len(fn.Parameters) != 1 {
t.Fatalf("function has wrong parameters. Parameters=%+v",
fn.Parameters)
}
if fn.Parameters[0].String() != "x" {
t.Fatalf("parameter is not 'x'. got=%q", fn.Parameters[0])
}
expectedBody := "(x + 2)"
if fn.Body.String() != expectedBody {
t.Fatalf("body is not %q. got=%q", expectedBody, fn.Body.String())
}
}
func TestFunctionApplication(t *testing.T) {
tests := []struct {
input string
expected int64
}{
{"let identity = fn(x) { x; }; identity(5);", 5},
{"let identity = fn(x) { return x; }; identity(5);", 5},
{"let double = fn(x) { x * 2; }; double(5);", 10},
{"let add = fn(x, y) { x + y; }; add(5, 5);", 10},
{"let add = fn(x, y) { x + y; }; add(5 + 5, add(5, 5));", 20},
{"fn(x) { x; }(5)", 5},
}
for _, tt := range tests {
testIntegerObject(t, testEval(tt.input), tt.expected)
}
}
func TestEnclosingEnvironments(t *testing.T) {
input := `
let first = 10;
let second = 10;
let third = 10;
let ourFunction = fn(first) {
let second = 20;
first + second + third;
};
ourFunction(20) + first + second;`
testIntegerObject(t, testEval(input), 70)
}
func testEval(input string) object.Object {
l := lexer.New(input)
p := parser.New(l)
program := p.ParseProgram()
env := object.NewEnvironment()
return Eval(program, env)
}
func testIntegerObject(t *testing.T, obj object.Object, expected int64) bool {
result, ok := obj.(*object.Integer)
if !ok {
t.Errorf("object is not Integer. got=%T (%+v)", obj, obj)
return false
}
if result.Value != expected {
t.Errorf("object has wrong value. got=%d, want=%d",
result.Value, expected)
return false
}
return true
}
func testBooleanObject(t *testing.T, obj object.Object, expected bool) bool {
result, ok := obj.(*object.Boolean)
if !ok {
t.Errorf("object is not Boolean. got=%T (%+v)", obj, obj)
return false
}
if result.Value != expected {
t.Errorf("object has wrong value. got=%t, want=%t",
result.Value, expected)
return false
}
return true
}
func testNullObject(t *testing.T, obj object.Object) bool {
if obj != NULL {
t.Errorf("object is not NULL. got=%T (%+v)", obj, obj)
return false
}
return true
}

View file

@ -0,0 +1,3 @@
module monkey
go 1.14

View file

@ -0,0 +1,137 @@
package lexer
import "monkey/token"
type Lexer struct {
input string
position int // current position in input (points to current char)
readPosition int // current reading position in input (after current char)
ch byte // current char under examination
}
func New(input string) *Lexer {
l := &Lexer{input: input}
l.readChar()
return l
}
func (l *Lexer) NextToken() token.Token {
var tok token.Token
l.skipWhitespace()
switch l.ch {
case '=':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
literal := string(ch) + string(l.ch)
tok = token.Token{Type: token.EQ, Literal: literal}
} else {
tok = newToken(token.ASSIGN, l.ch)
}
case '+':
tok = newToken(token.PLUS, l.ch)
case '-':
tok = newToken(token.MINUS, l.ch)
case '!':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
literal := string(ch) + string(l.ch)
tok = token.Token{Type: token.NOT_EQ, Literal: literal}
} else {
tok = newToken(token.BANG, l.ch)
}
case '/':
tok = newToken(token.SLASH, l.ch)
case '*':
tok = newToken(token.ASTERISK, l.ch)
case '<':
tok = newToken(token.LT, l.ch)
case '>':
tok = newToken(token.GT, l.ch)
case ';':
tok = newToken(token.SEMICOLON, l.ch)
case ',':
tok = newToken(token.COMMA, l.ch)
case '{':
tok = newToken(token.LBRACE, l.ch)
case '}':
tok = newToken(token.RBRACE, l.ch)
case '(':
tok = newToken(token.LPAREN, l.ch)
case ')':
tok = newToken(token.RPAREN, l.ch)
case 0:
tok.Literal = ""
tok.Type = token.EOF
default:
if isLetter(l.ch) {
tok.Literal = l.readIdentifier()
tok.Type = token.LookupIdent(tok.Literal)
return tok
} else if isDigit(l.ch) {
tok.Type = token.INT
tok.Literal = l.readNumber()
return tok
} else {
tok = newToken(token.ILLEGAL, l.ch)
}
}
l.readChar()
return tok
}
func (l *Lexer) skipWhitespace() {
for l.ch == ' ' || l.ch == '\t' || l.ch == '\n' || l.ch == '\r' {
l.readChar()
}
}
func (l *Lexer) readChar() {
if l.readPosition >= len(l.input) {
l.ch = 0
} else {
l.ch = l.input[l.readPosition]
}
l.position = l.readPosition
l.readPosition += 1
}
func (l *Lexer) peekChar() byte {
if l.readPosition >= len(l.input) {
return 0
} else {
return l.input[l.readPosition]
}
}
func (l *Lexer) readIdentifier() string {
position := l.position
for isLetter(l.ch) {
l.readChar()
}
return l.input[position:l.position]
}
func (l *Lexer) readNumber() string {
position := l.position
for isDigit(l.ch) {
l.readChar()
}
return l.input[position:l.position]
}
func isLetter(ch byte) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_'
}
func isDigit(ch byte) bool {
return '0' <= ch && ch <= '9'
}
func newToken(tokenType token.TokenType, ch byte) token.Token {
return token.Token{Type: tokenType, Literal: string(ch)}
}

View file

@ -0,0 +1,126 @@
package lexer
import (
"testing"
"monkey/token"
)
func TestNextToken(t *testing.T) {
input := `let five = 5;
let ten = 10;
let add = fn(x, y) {
x + y;
};
let result = add(five, ten);
!-/*5;
5 < 10 > 5;
if (5 < 10) {
return true;
} else {
return false;
}
10 == 10;
10 != 9;
`
tests := []struct {
expectedType token.TokenType
expectedLiteral string
}{
{token.LET, "let"},
{token.IDENT, "five"},
{token.ASSIGN, "="},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "ten"},
{token.ASSIGN, "="},
{token.INT, "10"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "add"},
{token.ASSIGN, "="},
{token.FUNCTION, "fn"},
{token.LPAREN, "("},
{token.IDENT, "x"},
{token.COMMA, ","},
{token.IDENT, "y"},
{token.RPAREN, ")"},
{token.LBRACE, "{"},
{token.IDENT, "x"},
{token.PLUS, "+"},
{token.IDENT, "y"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "result"},
{token.ASSIGN, "="},
{token.IDENT, "add"},
{token.LPAREN, "("},
{token.IDENT, "five"},
{token.COMMA, ","},
{token.IDENT, "ten"},
{token.RPAREN, ")"},
{token.SEMICOLON, ";"},
{token.BANG, "!"},
{token.MINUS, "-"},
{token.SLASH, "/"},
{token.ASTERISK, "*"},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.INT, "5"},
{token.LT, "<"},
{token.INT, "10"},
{token.GT, ">"},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.IF, "if"},
{token.LPAREN, "("},
{token.INT, "5"},
{token.LT, "<"},
{token.INT, "10"},
{token.RPAREN, ")"},
{token.LBRACE, "{"},
{token.RETURN, "return"},
{token.TRUE, "true"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.ELSE, "else"},
{token.LBRACE, "{"},
{token.RETURN, "return"},
{token.FALSE, "false"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.INT, "10"},
{token.EQ, "=="},
{token.INT, "10"},
{token.SEMICOLON, ";"},
{token.INT, "10"},
{token.NOT_EQ, "!="},
{token.INT, "9"},
{token.SEMICOLON, ";"},
{token.EOF, ""},
}
l := New(input)
for i, tt := range tests {
tok := l.NextToken()
if tok.Type != tt.expectedType {
t.Fatalf("tests[%d] - tokentype wrong. expected=%q, got=%q",
i, tt.expectedType, tok.Type)
}
if tok.Literal != tt.expectedLiteral {
t.Fatalf("tests[%d] - literal wrong. expected=%q, got=%q",
i, tt.expectedLiteral, tok.Literal)
}
}
}

View file

@ -0,0 +1,19 @@
package main
import (
"fmt"
"monkey/repl"
"os"
"os/user"
)
func main() {
user, err := user.Current()
if err != nil {
panic(err)
}
fmt.Printf("Hello %s! This is the Monkey programming language!\n",
user.Username)
fmt.Printf("Feel free to type in commands\n")
repl.Start(os.Stdin, os.Stdout)
}

View file

@ -0,0 +1,30 @@
package object
func NewEnclosedEnvironment(outer *Environment) *Environment {
env := NewEnvironment()
env.outer = outer
return env
}
func NewEnvironment() *Environment {
s := make(map[string]Object)
return &Environment{store: s, outer: nil}
}
type Environment struct {
store map[string]Object
outer *Environment
}
func (e *Environment) Get(name string) (Object, bool) {
obj, ok := e.store[name]
if !ok && e.outer != nil {
obj, ok = e.outer.Get(name)
}
return obj, ok
}
func (e *Environment) Set(name string, val Object) Object {
e.store[name] = val
return val
}

View file

@ -0,0 +1,85 @@
package object
import (
"bytes"
"fmt"
"monkey/ast"
"strings"
)
type ObjectType string
const (
NULL_OBJ = "NULL"
ERROR_OBJ = "ERROR"
INTEGER_OBJ = "INTEGER"
BOOLEAN_OBJ = "BOOLEAN"
RETURN_VALUE_OBJ = "RETURN_VALUE"
FUNCTION_OBJ = "FUNCTION"
)
type Object interface {
Type() ObjectType
Inspect() string
}
type Integer struct {
Value int64
}
func (i *Integer) Type() ObjectType { return INTEGER_OBJ }
func (i *Integer) Inspect() string { return fmt.Sprintf("%d", i.Value) }
type Boolean struct {
Value bool
}
func (b *Boolean) Type() ObjectType { return BOOLEAN_OBJ }
func (b *Boolean) Inspect() string { return fmt.Sprintf("%t", b.Value) }
type Null struct{}
func (n *Null) Type() ObjectType { return NULL_OBJ }
func (n *Null) Inspect() string { return "null" }
type ReturnValue struct {
Value Object
}
func (rv *ReturnValue) Type() ObjectType { return RETURN_VALUE_OBJ }
func (rv *ReturnValue) Inspect() string { return rv.Value.Inspect() }
type Error struct {
Message string
}
func (e *Error) Type() ObjectType { return ERROR_OBJ }
func (e *Error) Inspect() string { return "ERROR: " + e.Message }
type Function struct {
Parameters []*ast.Identifier
Body *ast.BlockStatement
Env *Environment
}
func (f *Function) Type() ObjectType { return FUNCTION_OBJ }
func (f *Function) Inspect() string {
var out bytes.Buffer
params := []string{}
for _, p := range f.Parameters {
params = append(params, p.String())
}
out.WriteString("fn")
out.WriteString("(")
out.WriteString(strings.Join(params, ", "))
out.WriteString(") {\n")
out.WriteString(f.Body.String())
out.WriteString("\n}")
return out.String()
}

View file

@ -0,0 +1,431 @@
package parser
import (
"fmt"
"monkey/ast"
"monkey/lexer"
"monkey/token"
"strconv"
)
const (
_ int = iota
LOWEST
EQUALS // ==
LESSGREATER // > or <
SUM // +
PRODUCT // *
PREFIX // -X or !X
CALL // myFunction(X)
)
var precedences = map[token.TokenType]int{
token.EQ: EQUALS,
token.NOT_EQ: EQUALS,
token.LT: LESSGREATER,
token.GT: LESSGREATER,
token.PLUS: SUM,
token.MINUS: SUM,
token.SLASH: PRODUCT,
token.ASTERISK: PRODUCT,
token.LPAREN: CALL,
}
type (
prefixParseFn func() ast.Expression
infixParseFn func(ast.Expression) ast.Expression
)
type Parser struct {
l *lexer.Lexer
errors []string
curToken token.Token
peekToken token.Token
prefixParseFns map[token.TokenType]prefixParseFn
infixParseFns map[token.TokenType]infixParseFn
}
func New(l *lexer.Lexer) *Parser {
p := &Parser{
l: l,
errors: []string{},
}
p.prefixParseFns = make(map[token.TokenType]prefixParseFn)
p.registerPrefix(token.IDENT, p.parseIdentifier)
p.registerPrefix(token.INT, p.parseIntegerLiteral)
p.registerPrefix(token.BANG, p.parsePrefixExpression)
p.registerPrefix(token.MINUS, p.parsePrefixExpression)
p.registerPrefix(token.TRUE, p.parseBoolean)
p.registerPrefix(token.FALSE, p.parseBoolean)
p.registerPrefix(token.LPAREN, p.parseGroupedExpression)
p.registerPrefix(token.IF, p.parseIfExpression)
p.registerPrefix(token.FUNCTION, p.parseFunctionLiteral)
p.infixParseFns = make(map[token.TokenType]infixParseFn)
p.registerInfix(token.PLUS, p.parseInfixExpression)
p.registerInfix(token.MINUS, p.parseInfixExpression)
p.registerInfix(token.SLASH, p.parseInfixExpression)
p.registerInfix(token.ASTERISK, p.parseInfixExpression)
p.registerInfix(token.EQ, p.parseInfixExpression)
p.registerInfix(token.NOT_EQ, p.parseInfixExpression)
p.registerInfix(token.LT, p.parseInfixExpression)
p.registerInfix(token.GT, p.parseInfixExpression)
p.registerInfix(token.LPAREN, p.parseCallExpression)
// Read two tokens, so curToken and peekToken are both set
p.nextToken()
p.nextToken()
return p
}
func (p *Parser) nextToken() {
p.curToken = p.peekToken
p.peekToken = p.l.NextToken()
}
func (p *Parser) curTokenIs(t token.TokenType) bool {
return p.curToken.Type == t
}
func (p *Parser) peekTokenIs(t token.TokenType) bool {
return p.peekToken.Type == t
}
func (p *Parser) expectPeek(t token.TokenType) bool {
if p.peekTokenIs(t) {
p.nextToken()
return true
} else {
p.peekError(t)
return false
}
}
func (p *Parser) Errors() []string {
return p.errors
}
func (p *Parser) peekError(t token.TokenType) {
msg := fmt.Sprintf("expected next token to be %s, got %s instead",
t, p.peekToken.Type)
p.errors = append(p.errors, msg)
}
func (p *Parser) noPrefixParseFnError(t token.TokenType) {
msg := fmt.Sprintf("no prefix parse function for %s found", t)
p.errors = append(p.errors, msg)
}
func (p *Parser) ParseProgram() *ast.Program {
program := &ast.Program{}
program.Statements = []ast.Statement{}
for !p.curTokenIs(token.EOF) {
stmt := p.parseStatement()
if stmt != nil {
program.Statements = append(program.Statements, stmt)
}
p.nextToken()
}
return program
}
func (p *Parser) parseStatement() ast.Statement {
switch p.curToken.Type {
case token.LET:
return p.parseLetStatement()
case token.RETURN:
return p.parseReturnStatement()
default:
return p.parseExpressionStatement()
}
}
func (p *Parser) parseLetStatement() *ast.LetStatement {
stmt := &ast.LetStatement{Token: p.curToken}
if !p.expectPeek(token.IDENT) {
return nil
}
stmt.Name = &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
if !p.expectPeek(token.ASSIGN) {
return nil
}
p.nextToken()
stmt.Value = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) parseReturnStatement() *ast.ReturnStatement {
stmt := &ast.ReturnStatement{Token: p.curToken}
p.nextToken()
stmt.ReturnValue = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) parseExpressionStatement() *ast.ExpressionStatement {
stmt := &ast.ExpressionStatement{Token: p.curToken}
stmt.Expression = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) parseExpression(precedence int) ast.Expression {
prefix := p.prefixParseFns[p.curToken.Type]
if prefix == nil {
p.noPrefixParseFnError(p.curToken.Type)
return nil
}
leftExp := prefix()
for !p.peekTokenIs(token.SEMICOLON) && precedence < p.peekPrecedence() {
infix := p.infixParseFns[p.peekToken.Type]
if infix == nil {
return leftExp
}
p.nextToken()
leftExp = infix(leftExp)
}
return leftExp
}
func (p *Parser) peekPrecedence() int {
if p, ok := precedences[p.peekToken.Type]; ok {
return p
}
return LOWEST
}
func (p *Parser) curPrecedence() int {
if p, ok := precedences[p.curToken.Type]; ok {
return p
}
return LOWEST
}
func (p *Parser) parseIdentifier() ast.Expression {
return &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
}
func (p *Parser) parseIntegerLiteral() ast.Expression {
lit := &ast.IntegerLiteral{Token: p.curToken}
value, err := strconv.ParseInt(p.curToken.Literal, 0, 64)
if err != nil {
msg := fmt.Sprintf("could not parse %q as integer", p.curToken.Literal)
p.errors = append(p.errors, msg)
return nil
}
lit.Value = value
return lit
}
func (p *Parser) parsePrefixExpression() ast.Expression {
expression := &ast.PrefixExpression{
Token: p.curToken,
Operator: p.curToken.Literal,
}
p.nextToken()
expression.Right = p.parseExpression(PREFIX)
return expression
}
func (p *Parser) parseInfixExpression(left ast.Expression) ast.Expression {
expression := &ast.InfixExpression{
Token: p.curToken,
Operator: p.curToken.Literal,
Left: left,
}
precedence := p.curPrecedence()
p.nextToken()
expression.Right = p.parseExpression(precedence)
return expression
}
func (p *Parser) parseBoolean() ast.Expression {
return &ast.Boolean{Token: p.curToken, Value: p.curTokenIs(token.TRUE)}
}
func (p *Parser) parseGroupedExpression() ast.Expression {
p.nextToken()
exp := p.parseExpression(LOWEST)
if !p.expectPeek(token.RPAREN) {
return nil
}
return exp
}
func (p *Parser) parseIfExpression() ast.Expression {
expression := &ast.IfExpression{Token: p.curToken}
if !p.expectPeek(token.LPAREN) {
return nil
}
p.nextToken()
expression.Condition = p.parseExpression(LOWEST)
if !p.expectPeek(token.RPAREN) {
return nil
}
if !p.expectPeek(token.LBRACE) {
return nil
}
expression.Consequence = p.parseBlockStatement()
if p.peekTokenIs(token.ELSE) {
p.nextToken()
if !p.expectPeek(token.LBRACE) {
return nil
}
expression.Alternative = p.parseBlockStatement()
}
return expression
}
func (p *Parser) parseBlockStatement() *ast.BlockStatement {
block := &ast.BlockStatement{Token: p.curToken}
block.Statements = []ast.Statement{}
p.nextToken()
for !p.curTokenIs(token.RBRACE) && !p.curTokenIs(token.EOF) {
stmt := p.parseStatement()
if stmt != nil {
block.Statements = append(block.Statements, stmt)
}
p.nextToken()
}
return block
}
func (p *Parser) parseFunctionLiteral() ast.Expression {
lit := &ast.FunctionLiteral{Token: p.curToken}
if !p.expectPeek(token.LPAREN) {
return nil
}
lit.Parameters = p.parseFunctionParameters()
if !p.expectPeek(token.LBRACE) {
return nil
}
lit.Body = p.parseBlockStatement()
return lit
}
func (p *Parser) parseFunctionParameters() []*ast.Identifier {
identifiers := []*ast.Identifier{}
if p.peekTokenIs(token.RPAREN) {
p.nextToken()
return identifiers
}
p.nextToken()
ident := &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
identifiers = append(identifiers, ident)
for p.peekTokenIs(token.COMMA) {
p.nextToken()
p.nextToken()
ident := &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
identifiers = append(identifiers, ident)
}
if !p.expectPeek(token.RPAREN) {
return nil
}
return identifiers
}
func (p *Parser) parseCallExpression(function ast.Expression) ast.Expression {
exp := &ast.CallExpression{Token: p.curToken, Function: function}
exp.Arguments = p.parseCallArguments()
return exp
}
func (p *Parser) parseCallArguments() []ast.Expression {
args := []ast.Expression{}
if p.peekTokenIs(token.RPAREN) {
p.nextToken()
return args
}
p.nextToken()
args = append(args, p.parseExpression(LOWEST))
for p.peekTokenIs(token.COMMA) {
p.nextToken()
p.nextToken()
args = append(args, p.parseExpression(LOWEST))
}
if !p.expectPeek(token.RPAREN) {
return nil
}
return args
}
func (p *Parser) registerPrefix(tokenType token.TokenType, fn prefixParseFn) {
p.prefixParseFns[tokenType] = fn
}
func (p *Parser) registerInfix(tokenType token.TokenType, fn infixParseFn) {
p.infixParseFns[tokenType] = fn
}

View file

@ -0,0 +1,817 @@
package parser
import (
"fmt"
"monkey/ast"
"monkey/lexer"
"testing"
)
func TestLetStatements(t *testing.T) {
tests := []struct {
input string
expectedIdentifier string
expectedValue interface{}
}{
{"let x = 5;", "x", 5},
{"let y = true;", "y", true},
{"let foobar = y;", "foobar", "y"},
}
for _, tt := range tests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain 1 statements. got=%d",
len(program.Statements))
}
stmt := program.Statements[0]
if !testLetStatement(t, stmt, tt.expectedIdentifier) {
return
}
val := stmt.(*ast.LetStatement).Value
if !testLiteralExpression(t, val, tt.expectedValue) {
return
}
}
}
func TestReturnStatements(t *testing.T) {
tests := []struct {
input string
expectedValue interface{}
}{
{"return 5;", 5},
{"return true;", true},
{"return foobar;", "foobar"},
}
for _, tt := range tests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain 1 statements. got=%d",
len(program.Statements))
}
stmt := program.Statements[0]
returnStmt, ok := stmt.(*ast.ReturnStatement)
if !ok {
t.Fatalf("stmt not *ast.ReturnStatement. got=%T", stmt)
}
if returnStmt.TokenLiteral() != "return" {
t.Fatalf("returnStmt.TokenLiteral not 'return', got %q",
returnStmt.TokenLiteral())
}
if testLiteralExpression(t, returnStmt.ReturnValue, tt.expectedValue) {
return
}
}
}
func TestIdentifierExpression(t *testing.T) {
input := "foobar;"
l := lexer.New(input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program has not enough statements. got=%d",
len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
ident, ok := stmt.Expression.(*ast.Identifier)
if !ok {
t.Fatalf("exp not *ast.Identifier. got=%T", stmt.Expression)
}
if ident.Value != "foobar" {
t.Errorf("ident.Value not %s. got=%s", "foobar", ident.Value)
}
if ident.TokenLiteral() != "foobar" {
t.Errorf("ident.TokenLiteral not %s. got=%s", "foobar",
ident.TokenLiteral())
}
}
func TestIntegerLiteralExpression(t *testing.T) {
input := "5;"
l := lexer.New(input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program has not enough statements. got=%d",
len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
literal, ok := stmt.Expression.(*ast.IntegerLiteral)
if !ok {
t.Fatalf("exp not *ast.IntegerLiteral. got=%T", stmt.Expression)
}
if literal.Value != 5 {
t.Errorf("literal.Value not %d. got=%d", 5, literal.Value)
}
if literal.TokenLiteral() != "5" {
t.Errorf("literal.TokenLiteral not %s. got=%s", "5",
literal.TokenLiteral())
}
}
func TestParsingPrefixExpressions(t *testing.T) {
prefixTests := []struct {
input string
operator string
value interface{}
}{
{"!5;", "!", 5},
{"-15;", "-", 15},
{"!foobar;", "!", "foobar"},
{"-foobar;", "-", "foobar"},
{"!true;", "!", true},
{"!false;", "!", false},
}
for _, tt := range prefixTests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain %d statements. got=%d\n",
1, len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
exp, ok := stmt.Expression.(*ast.PrefixExpression)
if !ok {
t.Fatalf("stmt is not ast.PrefixExpression. got=%T", stmt.Expression)
}
if exp.Operator != tt.operator {
t.Fatalf("exp.Operator is not '%s'. got=%s",
tt.operator, exp.Operator)
}
if !testLiteralExpression(t, exp.Right, tt.value) {
return
}
}
}
func TestParsingInfixExpressions(t *testing.T) {
infixTests := []struct {
input string
leftValue interface{}
operator string
rightValue interface{}
}{
{"5 + 5;", 5, "+", 5},
{"5 - 5;", 5, "-", 5},
{"5 * 5;", 5, "*", 5},
{"5 / 5;", 5, "/", 5},
{"5 > 5;", 5, ">", 5},
{"5 < 5;", 5, "<", 5},
{"5 == 5;", 5, "==", 5},
{"5 != 5;", 5, "!=", 5},
{"foobar + barfoo;", "foobar", "+", "barfoo"},
{"foobar - barfoo;", "foobar", "-", "barfoo"},
{"foobar * barfoo;", "foobar", "*", "barfoo"},
{"foobar / barfoo;", "foobar", "/", "barfoo"},
{"foobar > barfoo;", "foobar", ">", "barfoo"},
{"foobar < barfoo;", "foobar", "<", "barfoo"},
{"foobar == barfoo;", "foobar", "==", "barfoo"},
{"foobar != barfoo;", "foobar", "!=", "barfoo"},
{"true == true", true, "==", true},
{"true != false", true, "!=", false},
{"false == false", false, "==", false},
}
for _, tt := range infixTests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain %d statements. got=%d\n",
1, len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
if !testInfixExpression(t, stmt.Expression, tt.leftValue,
tt.operator, tt.rightValue) {
return
}
}
}
func TestOperatorPrecedenceParsing(t *testing.T) {
tests := []struct {
input string
expected string
}{
{
"-a * b",
"((-a) * b)",
},
{
"!-a",
"(!(-a))",
},
{
"a + b + c",
"((a + b) + c)",
},
{
"a + b - c",
"((a + b) - c)",
},
{
"a * b * c",
"((a * b) * c)",
},
{
"a * b / c",
"((a * b) / c)",
},
{
"a + b / c",
"(a + (b / c))",
},
{
"a + b * c + d / e - f",
"(((a + (b * c)) + (d / e)) - f)",
},
{
"3 + 4; -5 * 5",
"(3 + 4)((-5) * 5)",
},
{
"5 > 4 == 3 < 4",
"((5 > 4) == (3 < 4))",
},
{
"5 < 4 != 3 > 4",
"((5 < 4) != (3 > 4))",
},
{
"3 + 4 * 5 == 3 * 1 + 4 * 5",
"((3 + (4 * 5)) == ((3 * 1) + (4 * 5)))",
},
{
"true",
"true",
},
{
"false",
"false",
},
{
"3 > 5 == false",
"((3 > 5) == false)",
},
{
"3 < 5 == true",
"((3 < 5) == true)",
},
{
"1 + (2 + 3) + 4",
"((1 + (2 + 3)) + 4)",
},
{
"(5 + 5) * 2",
"((5 + 5) * 2)",
},
{
"2 / (5 + 5)",
"(2 / (5 + 5))",
},
{
"(5 + 5) * 2 * (5 + 5)",
"(((5 + 5) * 2) * (5 + 5))",
},
{
"-(5 + 5)",
"(-(5 + 5))",
},
{
"!(true == true)",
"(!(true == true))",
},
{
"a + add(b * c) + d",
"((a + add((b * c))) + d)",
},
{
"add(a, b, 1, 2 * 3, 4 + 5, add(6, 7 * 8))",
"add(a, b, 1, (2 * 3), (4 + 5), add(6, (7 * 8)))",
},
{
"add(a + b + c * d / f + g)",
"add((((a + b) + ((c * d) / f)) + g))",
},
}
for _, tt := range tests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
actual := program.String()
if actual != tt.expected {
t.Errorf("expected=%q, got=%q", tt.expected, actual)
}
}
}
func TestBooleanExpression(t *testing.T) {
tests := []struct {
input string
expectedBoolean bool
}{
{"true;", true},
{"false;", false},
}
for _, tt := range tests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program has not enough statements. got=%d",
len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
boolean, ok := stmt.Expression.(*ast.Boolean)
if !ok {
t.Fatalf("exp not *ast.Boolean. got=%T", stmt.Expression)
}
if boolean.Value != tt.expectedBoolean {
t.Errorf("boolean.Value not %t. got=%t", tt.expectedBoolean,
boolean.Value)
}
}
}
func TestIfExpression(t *testing.T) {
input := `if (x < y) { x }`
l := lexer.New(input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain %d statements. got=%d\n",
1, len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
exp, ok := stmt.Expression.(*ast.IfExpression)
if !ok {
t.Fatalf("stmt.Expression is not ast.IfExpression. got=%T",
stmt.Expression)
}
if !testInfixExpression(t, exp.Condition, "x", "<", "y") {
return
}
if len(exp.Consequence.Statements) != 1 {
t.Errorf("consequence is not 1 statements. got=%d\n",
len(exp.Consequence.Statements))
}
consequence, ok := exp.Consequence.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Statements[0] is not ast.ExpressionStatement. got=%T",
exp.Consequence.Statements[0])
}
if !testIdentifier(t, consequence.Expression, "x") {
return
}
if exp.Alternative != nil {
t.Errorf("exp.Alternative.Statements was not nil. got=%+v", exp.Alternative)
}
}
func TestIfElseExpression(t *testing.T) {
input := `if (x < y) { x } else { y }`
l := lexer.New(input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain %d statements. got=%d\n",
1, len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
exp, ok := stmt.Expression.(*ast.IfExpression)
if !ok {
t.Fatalf("stmt.Expression is not ast.IfExpression. got=%T", stmt.Expression)
}
if !testInfixExpression(t, exp.Condition, "x", "<", "y") {
return
}
if len(exp.Consequence.Statements) != 1 {
t.Errorf("consequence is not 1 statements. got=%d\n",
len(exp.Consequence.Statements))
}
consequence, ok := exp.Consequence.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Statements[0] is not ast.ExpressionStatement. got=%T",
exp.Consequence.Statements[0])
}
if !testIdentifier(t, consequence.Expression, "x") {
return
}
if len(exp.Alternative.Statements) != 1 {
t.Errorf("exp.Alternative.Statements does not contain 1 statements. got=%d\n",
len(exp.Alternative.Statements))
}
alternative, ok := exp.Alternative.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("Statements[0] is not ast.ExpressionStatement. got=%T",
exp.Alternative.Statements[0])
}
if !testIdentifier(t, alternative.Expression, "y") {
return
}
}
func TestFunctionLiteralParsing(t *testing.T) {
input := `fn(x, y) { x + y; }`
l := lexer.New(input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain %d statements. got=%d\n",
1, len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("program.Statements[0] is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
function, ok := stmt.Expression.(*ast.FunctionLiteral)
if !ok {
t.Fatalf("stmt.Expression is not ast.FunctionLiteral. got=%T",
stmt.Expression)
}
if len(function.Parameters) != 2 {
t.Fatalf("function literal parameters wrong. want 2, got=%d\n",
len(function.Parameters))
}
testLiteralExpression(t, function.Parameters[0], "x")
testLiteralExpression(t, function.Parameters[1], "y")
if len(function.Body.Statements) != 1 {
t.Fatalf("function.Body.Statements has not 1 statements. got=%d\n",
len(function.Body.Statements))
}
bodyStmt, ok := function.Body.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("function body stmt is not ast.ExpressionStatement. got=%T",
function.Body.Statements[0])
}
testInfixExpression(t, bodyStmt.Expression, "x", "+", "y")
}
func TestFunctionParameterParsing(t *testing.T) {
tests := []struct {
input string
expectedParams []string
}{
{input: "fn() {};", expectedParams: []string{}},
{input: "fn(x) {};", expectedParams: []string{"x"}},
{input: "fn(x, y, z) {};", expectedParams: []string{"x", "y", "z"}},
}
for _, tt := range tests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
stmt := program.Statements[0].(*ast.ExpressionStatement)
function := stmt.Expression.(*ast.FunctionLiteral)
if len(function.Parameters) != len(tt.expectedParams) {
t.Errorf("length parameters wrong. want %d, got=%d\n",
len(tt.expectedParams), len(function.Parameters))
}
for i, ident := range tt.expectedParams {
testLiteralExpression(t, function.Parameters[i], ident)
}
}
}
func TestCallExpressionParsing(t *testing.T) {
input := "add(1, 2 * 3, 4 + 5);"
l := lexer.New(input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
if len(program.Statements) != 1 {
t.Fatalf("program.Statements does not contain %d statements. got=%d\n",
1, len(program.Statements))
}
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
t.Fatalf("stmt is not ast.ExpressionStatement. got=%T",
program.Statements[0])
}
exp, ok := stmt.Expression.(*ast.CallExpression)
if !ok {
t.Fatalf("stmt.Expression is not ast.CallExpression. got=%T",
stmt.Expression)
}
if !testIdentifier(t, exp.Function, "add") {
return
}
if len(exp.Arguments) != 3 {
t.Fatalf("wrong length of arguments. got=%d", len(exp.Arguments))
}
testLiteralExpression(t, exp.Arguments[0], 1)
testInfixExpression(t, exp.Arguments[1], 2, "*", 3)
testInfixExpression(t, exp.Arguments[2], 4, "+", 5)
}
func TestCallExpressionParameterParsing(t *testing.T) {
tests := []struct {
input string
expectedIdent string
expectedArgs []string
}{
{
input: "add();",
expectedIdent: "add",
expectedArgs: []string{},
},
{
input: "add(1);",
expectedIdent: "add",
expectedArgs: []string{"1"},
},
{
input: "add(1, 2 * 3, 4 + 5);",
expectedIdent: "add",
expectedArgs: []string{"1", "(2 * 3)", "(4 + 5)"},
},
}
for _, tt := range tests {
l := lexer.New(tt.input)
p := New(l)
program := p.ParseProgram()
checkParserErrors(t, p)
stmt := program.Statements[0].(*ast.ExpressionStatement)
exp, ok := stmt.Expression.(*ast.CallExpression)
if !ok {
t.Fatalf("stmt.Expression is not ast.CallExpression. got=%T",
stmt.Expression)
}
if !testIdentifier(t, exp.Function, tt.expectedIdent) {
return
}
if len(exp.Arguments) != len(tt.expectedArgs) {
t.Fatalf("wrong number of arguments. want=%d, got=%d",
len(tt.expectedArgs), len(exp.Arguments))
}
for i, arg := range tt.expectedArgs {
if exp.Arguments[i].String() != arg {
t.Errorf("argument %d wrong. want=%q, got=%q", i,
arg, exp.Arguments[i].String())
}
}
}
}
func testLetStatement(t *testing.T, s ast.Statement, name string) bool {
if s.TokenLiteral() != "let" {
t.Errorf("s.TokenLiteral not 'let'. got=%q", s.TokenLiteral())
return false
}
letStmt, ok := s.(*ast.LetStatement)
if !ok {
t.Errorf("s not *ast.LetStatement. got=%T", s)
return false
}
if letStmt.Name.Value != name {
t.Errorf("letStmt.Name.Value not '%s'. got=%s", name, letStmt.Name.Value)
return false
}
if letStmt.Name.TokenLiteral() != name {
t.Errorf("letStmt.Name.TokenLiteral() not '%s'. got=%s",
name, letStmt.Name.TokenLiteral())
return false
}
return true
}
func testInfixExpression(t *testing.T, exp ast.Expression, left interface{},
operator string, right interface{}) bool {
opExp, ok := exp.(*ast.InfixExpression)
if !ok {
t.Errorf("exp is not ast.InfixExpression. got=%T(%s)", exp, exp)
return false
}
if !testLiteralExpression(t, opExp.Left, left) {
return false
}
if opExp.Operator != operator {
t.Errorf("exp.Operator is not '%s'. got=%q", operator, opExp.Operator)
return false
}
if !testLiteralExpression(t, opExp.Right, right) {
return false
}
return true
}
func testLiteralExpression(
t *testing.T,
exp ast.Expression,
expected interface{},
) bool {
switch v := expected.(type) {
case int:
return testIntegerLiteral(t, exp, int64(v))
case int64:
return testIntegerLiteral(t, exp, v)
case string:
return testIdentifier(t, exp, v)
case bool:
return testBooleanLiteral(t, exp, v)
}
t.Errorf("type of exp not handled. got=%T", exp)
return false
}
func testIntegerLiteral(t *testing.T, il ast.Expression, value int64) bool {
integ, ok := il.(*ast.IntegerLiteral)
if !ok {
t.Errorf("il not *ast.IntegerLiteral. got=%T", il)
return false
}
if integ.Value != value {
t.Errorf("integ.Value not %d. got=%d", value, integ.Value)
return false
}
if integ.TokenLiteral() != fmt.Sprintf("%d", value) {
t.Errorf("integ.TokenLiteral not %d. got=%s", value,
integ.TokenLiteral())
return false
}
return true
}
func testIdentifier(t *testing.T, exp ast.Expression, value string) bool {
ident, ok := exp.(*ast.Identifier)
if !ok {
t.Errorf("exp not *ast.Identifier. got=%T", exp)
return false
}
if ident.Value != value {
t.Errorf("ident.Value not %s. got=%s", value, ident.Value)
return false
}
if ident.TokenLiteral() != value {
t.Errorf("ident.TokenLiteral not %s. got=%s", value,
ident.TokenLiteral())
return false
}
return true
}
func testBooleanLiteral(t *testing.T, exp ast.Expression, value bool) bool {
bo, ok := exp.(*ast.Boolean)
if !ok {
t.Errorf("exp not *ast.Boolean. got=%T", exp)
return false
}
if bo.Value != value {
t.Errorf("bo.Value not %t. got=%t", value, bo.Value)
return false
}
if bo.TokenLiteral() != fmt.Sprintf("%t", value) {
t.Errorf("bo.TokenLiteral not %t. got=%s",
value, bo.TokenLiteral())
return false
}
return true
}
func checkParserErrors(t *testing.T, p *Parser) {
errors := p.Errors()
if len(errors) == 0 {
return
}
t.Errorf("parser has %d errors", len(errors))
for _, msg := range errors {
t.Errorf("parser error: %q", msg)
}
t.FailNow()
}

View file

@ -0,0 +1,32 @@
package parser
import (
"fmt"
"strings"
)
var traceLevel int = 0
const traceIdentPlaceholder string = "\t"
func identLevel() string {
return strings.Repeat(traceIdentPlaceholder, traceLevel-1)
}
func tracePrint(fs string) {
fmt.Printf("%s%s\n", identLevel(), fs)
}
func incIdent() { traceLevel = traceLevel + 1 }
func decIdent() { traceLevel = traceLevel - 1 }
func trace(msg string) string {
incIdent()
tracePrint("BEGIN " + msg)
return msg
}
func untrace(msg string) {
tracePrint("END " + msg)
decIdent()
}

View file

@ -0,0 +1,64 @@
package repl
import (
"bufio"
"fmt"
"io"
"monkey/evaluator"
"monkey/lexer"
"monkey/object"
"monkey/parser"
)
const PROMPT = ">> "
func Start(in io.Reader, out io.Writer) {
scanner := bufio.NewScanner(in)
env := object.NewEnvironment()
for {
fmt.Fprintf(out, PROMPT)
scanned := scanner.Scan()
if !scanned {
return
}
line := scanner.Text()
l := lexer.New(line)
p := parser.New(l)
program := p.ParseProgram()
if len(p.Errors()) != 0 {
printParserErrors(out, p.Errors())
continue
}
evaluated := evaluator.Eval(program, env)
if evaluated != nil {
io.WriteString(out, evaluated.Inspect())
io.WriteString(out, "\n")
}
}
}
const MONKEY_FACE = ` __,__
.--. .-" "-. .--.
/ .. \/ .-. .-. \/ .. \
| | '| / Y \ |' | |
| \ \ \ 0 | 0 / / / |
\ '- ,\.-"""""""-./, -' /
''-' /_ ^ ^ _\ '-''
| \._ _./ |
\ \ '~' / /
'._ '-=-' _.'
'-----'
`
func printParserErrors(out io.Writer, errors []string) {
io.WriteString(out, MONKEY_FACE)
io.WriteString(out, "Woops! We ran into some monkey business here!\n")
io.WriteString(out, " parser errors:\n")
for _, msg := range errors {
io.WriteString(out, "\t"+msg+"\n")
}
}

View file

@ -0,0 +1,66 @@
package token
type TokenType string
const (
ILLEGAL = "ILLEGAL"
EOF = "EOF"
// Identifiers + literals
IDENT = "IDENT" // add, foobar, x, y, ...
INT = "INT" // 1343456
// Operators
ASSIGN = "="
PLUS = "+"
MINUS = "-"
BANG = "!"
ASTERISK = "*"
SLASH = "/"
LT = "<"
GT = ">"
EQ = "=="
NOT_EQ = "!="
// Delimiters
COMMA = ","
SEMICOLON = ";"
LPAREN = "("
RPAREN = ")"
LBRACE = "{"
RBRACE = "}"
// Keywords
FUNCTION = "FUNCTION"
LET = "LET"
TRUE = "TRUE"
FALSE = "FALSE"
IF = "IF"
ELSE = "ELSE"
RETURN = "RETURN"
)
type Token struct {
Type TokenType
Literal string
}
var keywords = map[string]TokenType{
"fn": FUNCTION,
"let": LET,
"true": TRUE,
"false": FALSE,
"if": IF,
"else": ELSE,
"return": RETURN,
}
func LookupIdent(ident string) TokenType {
if tok, ok := keywords[ident]; ok {
return tok
}
return IDENT
}

1
waiig_code_1.7/04/.envrc Normal file
View file

@ -0,0 +1 @@
export GOPATH=$(pwd)

View file

@ -0,0 +1,339 @@
package ast
import (
"bytes"
"monkey/token"
"strings"
)
// The base Node interface
type Node interface {
TokenLiteral() string
String() string
}
// All statement nodes implement this
type Statement interface {
Node
statementNode()
}
// All expression nodes implement this
type Expression interface {
Node
expressionNode()
}
type Program struct {
Statements []Statement
}
func (p *Program) TokenLiteral() string {
if len(p.Statements) > 0 {
return p.Statements[0].TokenLiteral()
} else {
return ""
}
}
func (p *Program) String() string {
var out bytes.Buffer
for _, s := range p.Statements {
out.WriteString(s.String())
}
return out.String()
}
// Statements
type LetStatement struct {
Token token.Token // the token.LET token
Name *Identifier
Value Expression
}
func (ls *LetStatement) statementNode() {}
func (ls *LetStatement) TokenLiteral() string { return ls.Token.Literal }
func (ls *LetStatement) String() string {
var out bytes.Buffer
out.WriteString(ls.TokenLiteral() + " ")
out.WriteString(ls.Name.String())
out.WriteString(" = ")
if ls.Value != nil {
out.WriteString(ls.Value.String())
}
out.WriteString(";")
return out.String()
}
type ReturnStatement struct {
Token token.Token // the 'return' token
ReturnValue Expression
}
func (rs *ReturnStatement) statementNode() {}
func (rs *ReturnStatement) TokenLiteral() string { return rs.Token.Literal }
func (rs *ReturnStatement) String() string {
var out bytes.Buffer
out.WriteString(rs.TokenLiteral() + " ")
if rs.ReturnValue != nil {
out.WriteString(rs.ReturnValue.String())
}
out.WriteString(";")
return out.String()
}
type ExpressionStatement struct {
Token token.Token // the first token of the expression
Expression Expression
}
func (es *ExpressionStatement) statementNode() {}
func (es *ExpressionStatement) TokenLiteral() string { return es.Token.Literal }
func (es *ExpressionStatement) String() string {
if es.Expression != nil {
return es.Expression.String()
}
return ""
}
type BlockStatement struct {
Token token.Token // the { token
Statements []Statement
}
func (bs *BlockStatement) statementNode() {}
func (bs *BlockStatement) TokenLiteral() string { return bs.Token.Literal }
func (bs *BlockStatement) String() string {
var out bytes.Buffer
for _, s := range bs.Statements {
out.WriteString(s.String())
}
return out.String()
}
// Expressions
type Identifier struct {
Token token.Token // the token.IDENT token
Value string
}
func (i *Identifier) expressionNode() {}
func (i *Identifier) TokenLiteral() string { return i.Token.Literal }
func (i *Identifier) String() string { return i.Value }
type Boolean struct {
Token token.Token
Value bool
}
func (b *Boolean) expressionNode() {}
func (b *Boolean) TokenLiteral() string { return b.Token.Literal }
func (b *Boolean) String() string { return b.Token.Literal }
type IntegerLiteral struct {
Token token.Token
Value int64
}
func (il *IntegerLiteral) expressionNode() {}
func (il *IntegerLiteral) TokenLiteral() string { return il.Token.Literal }
func (il *IntegerLiteral) String() string { return il.Token.Literal }
type PrefixExpression struct {
Token token.Token // The prefix token, e.g. !
Operator string
Right Expression
}
func (pe *PrefixExpression) expressionNode() {}
func (pe *PrefixExpression) TokenLiteral() string { return pe.Token.Literal }
func (pe *PrefixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(pe.Operator)
out.WriteString(pe.Right.String())
out.WriteString(")")
return out.String()
}
type InfixExpression struct {
Token token.Token // The operator token, e.g. +
Left Expression
Operator string
Right Expression
}
func (ie *InfixExpression) expressionNode() {}
func (ie *InfixExpression) TokenLiteral() string { return ie.Token.Literal }
func (ie *InfixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(ie.Left.String())
out.WriteString(" " + ie.Operator + " ")
out.WriteString(ie.Right.String())
out.WriteString(")")
return out.String()
}
type IfExpression struct {
Token token.Token // The 'if' token
Condition Expression
Consequence *BlockStatement
Alternative *BlockStatement
}
func (ie *IfExpression) expressionNode() {}
func (ie *IfExpression) TokenLiteral() string { return ie.Token.Literal }
func (ie *IfExpression) String() string {
var out bytes.Buffer
out.WriteString("if")
out.WriteString(ie.Condition.String())
out.WriteString(" ")
out.WriteString(ie.Consequence.String())
if ie.Alternative != nil {
out.WriteString("else ")
out.WriteString(ie.Alternative.String())
}
return out.String()
}
type FunctionLiteral struct {
Token token.Token // The 'fn' token
Parameters []*Identifier
Body *BlockStatement
}
func (fl *FunctionLiteral) expressionNode() {}
func (fl *FunctionLiteral) TokenLiteral() string { return fl.Token.Literal }
func (fl *FunctionLiteral) String() string {
var out bytes.Buffer
params := []string{}
for _, p := range fl.Parameters {
params = append(params, p.String())
}
out.WriteString(fl.TokenLiteral())
out.WriteString("(")
out.WriteString(strings.Join(params, ", "))
out.WriteString(") ")
out.WriteString(fl.Body.String())
return out.String()
}
type CallExpression struct {
Token token.Token // The '(' token
Function Expression // Identifier or FunctionLiteral
Arguments []Expression
}
func (ce *CallExpression) expressionNode() {}
func (ce *CallExpression) TokenLiteral() string { return ce.Token.Literal }
func (ce *CallExpression) String() string {
var out bytes.Buffer
args := []string{}
for _, a := range ce.Arguments {
args = append(args, a.String())
}
out.WriteString(ce.Function.String())
out.WriteString("(")
out.WriteString(strings.Join(args, ", "))
out.WriteString(")")
return out.String()
}
type StringLiteral struct {
Token token.Token
Value string
}
func (sl *StringLiteral) expressionNode() {}
func (sl *StringLiteral) TokenLiteral() string { return sl.Token.Literal }
func (sl *StringLiteral) String() string { return sl.Token.Literal }
type ArrayLiteral struct {
Token token.Token // the '[' token
Elements []Expression
}
func (al *ArrayLiteral) expressionNode() {}
func (al *ArrayLiteral) TokenLiteral() string { return al.Token.Literal }
func (al *ArrayLiteral) String() string {
var out bytes.Buffer
elements := []string{}
for _, el := range al.Elements {
elements = append(elements, el.String())
}
out.WriteString("[")
out.WriteString(strings.Join(elements, ", "))
out.WriteString("]")
return out.String()
}
type IndexExpression struct {
Token token.Token // The [ token
Left Expression
Index Expression
}
func (ie *IndexExpression) expressionNode() {}
func (ie *IndexExpression) TokenLiteral() string { return ie.Token.Literal }
func (ie *IndexExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(ie.Left.String())
out.WriteString("[")
out.WriteString(ie.Index.String())
out.WriteString("])")
return out.String()
}
type HashLiteral struct {
Token token.Token // the '{' token
Pairs map[Expression]Expression
}
func (hl *HashLiteral) expressionNode() {}
func (hl *HashLiteral) TokenLiteral() string { return hl.Token.Literal }
func (hl *HashLiteral) String() string {
var out bytes.Buffer
pairs := []string{}
for key, value := range hl.Pairs {
pairs = append(pairs, key.String()+":"+value.String())
}
out.WriteString("{")
out.WriteString(strings.Join(pairs, ", "))
out.WriteString("}")
return out.String()
}

View file

@ -0,0 +1,28 @@
package ast
import (
"monkey/token"
"testing"
)
func TestString(t *testing.T) {
program := &Program{
Statements: []Statement{
&LetStatement{
Token: token.Token{Type: token.LET, Literal: "let"},
Name: &Identifier{
Token: token.Token{Type: token.IDENT, Literal: "myVar"},
Value: "myVar",
},
Value: &Identifier{
Token: token.Token{Type: token.IDENT, Literal: "anotherVar"},
Value: "anotherVar",
},
},
},
}
if program.String() != "let myVar = anotherVar;" {
t.Errorf("program.String() wrong. got=%q", program.String())
}
}

View file

@ -0,0 +1,117 @@
package evaluator
import (
"fmt"
"monkey/object"
)
var builtins = map[string]*object.Builtin{
"len": &object.Builtin{Fn: func(args ...object.Object) object.Object {
if len(args) != 1 {
return newError("wrong number of arguments. got=%d, want=1",
len(args))
}
switch arg := args[0].(type) {
case *object.Array:
return &object.Integer{Value: int64(len(arg.Elements))}
case *object.String:
return &object.Integer{Value: int64(len(arg.Value))}
default:
return newError("argument to `len` not supported, got %s",
args[0].Type())
}
},
},
"puts": &object.Builtin{
Fn: func(args ...object.Object) object.Object {
for _, arg := range args {
fmt.Println(arg.Inspect())
}
return NULL
},
},
"first": &object.Builtin{
Fn: func(args ...object.Object) object.Object {
if len(args) != 1 {
return newError("wrong number of arguments. got=%d, want=1",
len(args))
}
if args[0].Type() != object.ARRAY_OBJ {
return newError("argument to `first` must be ARRAY, got %s",
args[0].Type())
}
arr := args[0].(*object.Array)
if len(arr.Elements) > 0 {
return arr.Elements[0]
}
return NULL
},
},
"last": &object.Builtin{
Fn: func(args ...object.Object) object.Object {
if len(args) != 1 {
return newError("wrong number of arguments. got=%d, want=1",
len(args))
}
if args[0].Type() != object.ARRAY_OBJ {
return newError("argument to `last` must be ARRAY, got %s",
args[0].Type())
}
arr := args[0].(*object.Array)
length := len(arr.Elements)
if length > 0 {
return arr.Elements[length-1]
}
return NULL
},
},
"rest": &object.Builtin{
Fn: func(args ...object.Object) object.Object {
if len(args) != 1 {
return newError("wrong number of arguments. got=%d, want=1",
len(args))
}
if args[0].Type() != object.ARRAY_OBJ {
return newError("argument to `rest` must be ARRAY, got %s",
args[0].Type())
}
arr := args[0].(*object.Array)
length := len(arr.Elements)
if length > 0 {
newElements := make([]object.Object, length-1, length-1)
copy(newElements, arr.Elements[1:length])
return &object.Array{Elements: newElements}
}
return NULL
},
},
"push": &object.Builtin{
Fn: func(args ...object.Object) object.Object {
if len(args) != 2 {
return newError("wrong number of arguments. got=%d, want=2",
len(args))
}
if args[0].Type() != object.ARRAY_OBJ {
return newError("argument to `push` must be ARRAY, got %s",
args[0].Type())
}
arr := args[0].(*object.Array)
length := len(arr.Elements)
newElements := make([]object.Object, length+1, length+1)
copy(newElements, arr.Elements)
newElements[length] = args[1]
return &object.Array{Elements: newElements}
},
},
}

View file

@ -0,0 +1,442 @@
package evaluator
import (
"fmt"
"monkey/ast"
"monkey/object"
)
var (
NULL = &object.Null{}
TRUE = &object.Boolean{Value: true}
FALSE = &object.Boolean{Value: false}
)
func Eval(node ast.Node, env *object.Environment) object.Object {
switch node := node.(type) {
// Statements
case *ast.Program:
return evalProgram(node, env)
case *ast.BlockStatement:
return evalBlockStatement(node, env)
case *ast.ExpressionStatement:
return Eval(node.Expression, env)
case *ast.ReturnStatement:
val := Eval(node.ReturnValue, env)
if isError(val) {
return val
}
return &object.ReturnValue{Value: val}
case *ast.LetStatement:
val := Eval(node.Value, env)
if isError(val) {
return val
}
env.Set(node.Name.Value, val)
// Expressions
case *ast.IntegerLiteral:
return &object.Integer{Value: node.Value}
case *ast.StringLiteral:
return &object.String{Value: node.Value}
case *ast.Boolean:
return nativeBoolToBooleanObject(node.Value)
case *ast.PrefixExpression:
right := Eval(node.Right, env)
if isError(right) {
return right
}
return evalPrefixExpression(node.Operator, right)
case *ast.InfixExpression:
left := Eval(node.Left, env)
if isError(left) {
return left
}
right := Eval(node.Right, env)
if isError(right) {
return right
}
return evalInfixExpression(node.Operator, left, right)
case *ast.IfExpression:
return evalIfExpression(node, env)
case *ast.Identifier:
return evalIdentifier(node, env)
case *ast.FunctionLiteral:
params := node.Parameters
body := node.Body
return &object.Function{Parameters: params, Env: env, Body: body}
case *ast.CallExpression:
function := Eval(node.Function, env)
if isError(function) {
return function
}
args := evalExpressions(node.Arguments, env)
if len(args) == 1 && isError(args[0]) {
return args[0]
}
return applyFunction(function, args)
case *ast.ArrayLiteral:
elements := evalExpressions(node.Elements, env)
if len(elements) == 1 && isError(elements[0]) {
return elements[0]
}
return &object.Array{Elements: elements}
case *ast.IndexExpression:
left := Eval(node.Left, env)
if isError(left) {
return left
}
index := Eval(node.Index, env)
if isError(index) {
return index
}
return evalIndexExpression(left, index)
case *ast.HashLiteral:
return evalHashLiteral(node, env)
}
return nil
}
func evalProgram(program *ast.Program, env *object.Environment) object.Object {
var result object.Object
for _, statement := range program.Statements {
result = Eval(statement, env)
switch result := result.(type) {
case *object.ReturnValue:
return result.Value
case *object.Error:
return result
}
}
return result
}
func evalBlockStatement(
block *ast.BlockStatement,
env *object.Environment,
) object.Object {
var result object.Object
for _, statement := range block.Statements {
result = Eval(statement, env)
if result != nil {
rt := result.Type()
if rt == object.RETURN_VALUE_OBJ || rt == object.ERROR_OBJ {
return result
}
}
}
return result
}
func nativeBoolToBooleanObject(input bool) *object.Boolean {
if input {
return TRUE
}
return FALSE
}
func evalPrefixExpression(operator string, right object.Object) object.Object {
switch operator {
case "!":
return evalBangOperatorExpression(right)
case "-":
return evalMinusPrefixOperatorExpression(right)
default:
return newError("unknown operator: %s%s", operator, right.Type())
}
}
func evalInfixExpression(
operator string,
left, right object.Object,
) object.Object {
switch {
case left.Type() == object.INTEGER_OBJ && right.Type() == object.INTEGER_OBJ:
return evalIntegerInfixExpression(operator, left, right)
case left.Type() == object.STRING_OBJ && right.Type() == object.STRING_OBJ:
return evalStringInfixExpression(operator, left, right)
case operator == "==":
return nativeBoolToBooleanObject(left == right)
case operator == "!=":
return nativeBoolToBooleanObject(left != right)
case left.Type() != right.Type():
return newError("type mismatch: %s %s %s",
left.Type(), operator, right.Type())
default:
return newError("unknown operator: %s %s %s",
left.Type(), operator, right.Type())
}
}
func evalBangOperatorExpression(right object.Object) object.Object {
switch right {
case TRUE:
return FALSE
case FALSE:
return TRUE
case NULL:
return TRUE
default:
return FALSE
}
}
func evalMinusPrefixOperatorExpression(right object.Object) object.Object {
if right.Type() != object.INTEGER_OBJ {
return newError("unknown operator: -%s", right.Type())
}
value := right.(*object.Integer).Value
return &object.Integer{Value: -value}
}
func evalIntegerInfixExpression(
operator string,
left, right object.Object,
) object.Object {
leftVal := left.(*object.Integer).Value
rightVal := right.(*object.Integer).Value
switch operator {
case "+":
return &object.Integer{Value: leftVal + rightVal}
case "-":
return &object.Integer{Value: leftVal - rightVal}
case "*":
return &object.Integer{Value: leftVal * rightVal}
case "/":
return &object.Integer{Value: leftVal / rightVal}
case "<":
return nativeBoolToBooleanObject(leftVal < rightVal)
case ">":
return nativeBoolToBooleanObject(leftVal > rightVal)
case "==":
return nativeBoolToBooleanObject(leftVal == rightVal)
case "!=":
return nativeBoolToBooleanObject(leftVal != rightVal)
default:
return newError("unknown operator: %s %s %s",
left.Type(), operator, right.Type())
}
}
func evalStringInfixExpression(
operator string,
left, right object.Object,
) object.Object {
if operator != "+" {
return newError("unknown operator: %s %s %s",
left.Type(), operator, right.Type())
}
leftVal := left.(*object.String).Value
rightVal := right.(*object.String).Value
return &object.String{Value: leftVal + rightVal}
}
func evalIfExpression(
ie *ast.IfExpression,
env *object.Environment,
) object.Object {
condition := Eval(ie.Condition, env)
if isError(condition) {
return condition
}
if isTruthy(condition) {
return Eval(ie.Consequence, env)
} else if ie.Alternative != nil {
return Eval(ie.Alternative, env)
} else {
return NULL
}
}
func evalIdentifier(
node *ast.Identifier,
env *object.Environment,
) object.Object {
if val, ok := env.Get(node.Value); ok {
return val
}
if builtin, ok := builtins[node.Value]; ok {
return builtin
}
return newError("identifier not found: " + node.Value)
}
func isTruthy(obj object.Object) bool {
switch obj {
case NULL:
return false
case TRUE:
return true
case FALSE:
return false
default:
return true
}
}
func newError(format string, a ...interface{}) *object.Error {
return &object.Error{Message: fmt.Sprintf(format, a...)}
}
func isError(obj object.Object) bool {
if obj != nil {
return obj.Type() == object.ERROR_OBJ
}
return false
}
func evalExpressions(
exps []ast.Expression,
env *object.Environment,
) []object.Object {
var result []object.Object
for _, e := range exps {
evaluated := Eval(e, env)
if isError(evaluated) {
return []object.Object{evaluated}
}
result = append(result, evaluated)
}
return result
}
func applyFunction(fn object.Object, args []object.Object) object.Object {
switch fn := fn.(type) {
case *object.Function:
extendedEnv := extendFunctionEnv(fn, args)
evaluated := Eval(fn.Body, extendedEnv)
return unwrapReturnValue(evaluated)
case *object.Builtin:
return fn.Fn(args...)
default:
return newError("not a function: %s", fn.Type())
}
}
func extendFunctionEnv(
fn *object.Function,
args []object.Object,
) *object.Environment {
env := object.NewEnclosedEnvironment(fn.Env)
for paramIdx, param := range fn.Parameters {
env.Set(param.Value, args[paramIdx])
}
return env
}
func unwrapReturnValue(obj object.Object) object.Object {
if returnValue, ok := obj.(*object.ReturnValue); ok {
return returnValue.Value
}
return obj
}
func evalIndexExpression(left, index object.Object) object.Object {
switch {
case left.Type() == object.ARRAY_OBJ && index.Type() == object.INTEGER_OBJ:
return evalArrayIndexExpression(left, index)
case left.Type() == object.HASH_OBJ:
return evalHashIndexExpression(left, index)
default:
return newError("index operator not supported: %s", left.Type())
}
}
func evalArrayIndexExpression(array, index object.Object) object.Object {
arrayObject := array.(*object.Array)
idx := index.(*object.Integer).Value
max := int64(len(arrayObject.Elements) - 1)
if idx < 0 || idx > max {
return NULL
}
return arrayObject.Elements[idx]
}
func evalHashLiteral(
node *ast.HashLiteral,
env *object.Environment,
) object.Object {
pairs := make(map[object.HashKey]object.HashPair)
for keyNode, valueNode := range node.Pairs {
key := Eval(keyNode, env)
if isError(key) {
return key
}
hashKey, ok := key.(object.Hashable)
if !ok {
return newError("unusable as hash key: %s", key.Type())
}
value := Eval(valueNode, env)
if isError(value) {
return value
}
hashed := hashKey.HashKey()
pairs[hashed] = object.HashPair{Key: key, Value: value}
}
return &object.Hash{Pairs: pairs}
}
func evalHashIndexExpression(hash, index object.Object) object.Object {
hashObject := hash.(*object.Hash)
key, ok := index.(object.Hashable)
if !ok {
return newError("unusable as hash key: %s", index.Type())
}
pair, ok := hashObject.Pairs[key.HashKey()]
if !ok {
return NULL
}
return pair.Value
}

View file

@ -0,0 +1,629 @@
package evaluator
import (
"monkey/lexer"
"monkey/object"
"monkey/parser"
"testing"
)
func TestEvalIntegerExpression(t *testing.T) {
tests := []struct {
input string
expected int64
}{
{"5", 5},
{"10", 10},
{"-5", -5},
{"-10", -10},
{"5 + 5 + 5 + 5 - 10", 10},
{"2 * 2 * 2 * 2 * 2", 32},
{"-50 + 100 + -50", 0},
{"5 * 2 + 10", 20},
{"5 + 2 * 10", 25},
{"20 + 2 * -10", 0},
{"50 / 2 * 2 + 10", 60},
{"2 * (5 + 10)", 30},
{"3 * 3 * 3 + 10", 37},
{"3 * (3 * 3) + 10", 37},
{"(5 + 10 * 2 + 15 / 3) * 2 + -10", 50},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
testIntegerObject(t, evaluated, tt.expected)
}
}
func TestEvalBooleanExpression(t *testing.T) {
tests := []struct {
input string
expected bool
}{
{"true", true},
{"false", false},
{"1 < 2", true},
{"1 > 2", false},
{"1 < 1", false},
{"1 > 1", false},
{"1 == 1", true},
{"1 != 1", false},
{"1 == 2", false},
{"1 != 2", true},
{"true == true", true},
{"false == false", true},
{"true == false", false},
{"true != false", true},
{"false != true", true},
{"(1 < 2) == true", true},
{"(1 < 2) == false", false},
{"(1 > 2) == true", false},
{"(1 > 2) == false", true},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
testBooleanObject(t, evaluated, tt.expected)
}
}
func TestBangOperator(t *testing.T) {
tests := []struct {
input string
expected bool
}{
{"!true", false},
{"!false", true},
{"!5", false},
{"!!true", true},
{"!!false", false},
{"!!5", true},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
testBooleanObject(t, evaluated, tt.expected)
}
}
func TestIfElseExpressions(t *testing.T) {
tests := []struct {
input string
expected interface{}
}{
{"if (true) { 10 }", 10},
{"if (false) { 10 }", nil},
{"if (1) { 10 }", 10},
{"if (1 < 2) { 10 }", 10},
{"if (1 > 2) { 10 }", nil},
{"if (1 > 2) { 10 } else { 20 }", 20},
{"if (1 < 2) { 10 } else { 20 }", 10},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
integer, ok := tt.expected.(int)
if ok {
testIntegerObject(t, evaluated, int64(integer))
} else {
testNullObject(t, evaluated)
}
}
}
func TestReturnStatements(t *testing.T) {
tests := []struct {
input string
expected int64
}{
{"return 10;", 10},
{"return 10; 9;", 10},
{"return 2 * 5; 9;", 10},
{"9; return 2 * 5; 9;", 10},
{"if (10 > 1) { return 10; }", 10},
{
`
if (10 > 1) {
if (10 > 1) {
return 10;
}
return 1;
}
`,
10,
},
{
`
let f = fn(x) {
return x;
x + 10;
};
f(10);`,
10,
},
{
`
let f = fn(x) {
let result = x + 10;
return result;
return 10;
};
f(10);`,
20,
},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
testIntegerObject(t, evaluated, tt.expected)
}
}
func TestErrorHandling(t *testing.T) {
tests := []struct {
input string
expectedMessage string
}{
{
"5 + true;",
"type mismatch: INTEGER + BOOLEAN",
},
{
"5 + true; 5;",
"type mismatch: INTEGER + BOOLEAN",
},
{
"-true",
"unknown operator: -BOOLEAN",
},
{
"true + false;",
"unknown operator: BOOLEAN + BOOLEAN",
},
{
"true + false + true + false;",
"unknown operator: BOOLEAN + BOOLEAN",
},
{
"5; true + false; 5",
"unknown operator: BOOLEAN + BOOLEAN",
},
{
`"Hello" - "World"`,
"unknown operator: STRING - STRING",
},
{
"if (10 > 1) { true + false; }",
"unknown operator: BOOLEAN + BOOLEAN",
},
{
`
if (10 > 1) {
if (10 > 1) {
return true + false;
}
return 1;
}
`,
"unknown operator: BOOLEAN + BOOLEAN",
},
{
"foobar",
"identifier not found: foobar",
},
{
`{"name": "Monkey"}[fn(x) { x }];`,
"unusable as hash key: FUNCTION",
},
{
`999[1]`,
"index operator not supported: INTEGER",
},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
errObj, ok := evaluated.(*object.Error)
if !ok {
t.Errorf("no error object returned. got=%T(%+v)",
evaluated, evaluated)
continue
}
if errObj.Message != tt.expectedMessage {
t.Errorf("wrong error message. expected=%q, got=%q",
tt.expectedMessage, errObj.Message)
}
}
}
func TestLetStatements(t *testing.T) {
tests := []struct {
input string
expected int64
}{
{"let a = 5; a;", 5},
{"let a = 5 * 5; a;", 25},
{"let a = 5; let b = a; b;", 5},
{"let a = 5; let b = a; let c = a + b + 5; c;", 15},
}
for _, tt := range tests {
testIntegerObject(t, testEval(tt.input), tt.expected)
}
}
func TestFunctionObject(t *testing.T) {
input := "fn(x) { x + 2; };"
evaluated := testEval(input)
fn, ok := evaluated.(*object.Function)
if !ok {
t.Fatalf("object is not Function. got=%T (%+v)", evaluated, evaluated)
}
if len(fn.Parameters) != 1 {
t.Fatalf("function has wrong parameters. Parameters=%+v",
fn.Parameters)
}
if fn.Parameters[0].String() != "x" {
t.Fatalf("parameter is not 'x'. got=%q", fn.Parameters[0])
}
expectedBody := "(x + 2)"
if fn.Body.String() != expectedBody {
t.Fatalf("body is not %q. got=%q", expectedBody, fn.Body.String())
}
}
func TestFunctionApplication(t *testing.T) {
tests := []struct {
input string
expected int64
}{
{"let identity = fn(x) { x; }; identity(5);", 5},
{"let identity = fn(x) { return x; }; identity(5);", 5},
{"let double = fn(x) { x * 2; }; double(5);", 10},
{"let add = fn(x, y) { x + y; }; add(5, 5);", 10},
{"let add = fn(x, y) { x + y; }; add(5 + 5, add(5, 5));", 20},
{"fn(x) { x; }(5)", 5},
}
for _, tt := range tests {
testIntegerObject(t, testEval(tt.input), tt.expected)
}
}
func TestEnclosingEnvironments(t *testing.T) {
input := `
let first = 10;
let second = 10;
let third = 10;
let ourFunction = fn(first) {
let second = 20;
first + second + third;
};
ourFunction(20) + first + second;`
testIntegerObject(t, testEval(input), 70)
}
func TestClosures(t *testing.T) {
input := `
let newAdder = fn(x) {
fn(y) { x + y };
};
let addTwo = newAdder(2);
addTwo(2);`
testIntegerObject(t, testEval(input), 4)
}
func TestStringLiteral(t *testing.T) {
input := `"Hello World!"`
evaluated := testEval(input)
str, ok := evaluated.(*object.String)
if !ok {
t.Fatalf("object is not String. got=%T (%+v)", evaluated, evaluated)
}
if str.Value != "Hello World!" {
t.Errorf("String has wrong value. got=%q", str.Value)
}
}
func TestStringConcatenation(t *testing.T) {
input := `"Hello" + " " + "World!"`
evaluated := testEval(input)
str, ok := evaluated.(*object.String)
if !ok {
t.Fatalf("object is not String. got=%T (%+v)", evaluated, evaluated)
}
if str.Value != "Hello World!" {
t.Errorf("String has wrong value. got=%q", str.Value)
}
}
func TestBuiltinFunctions(t *testing.T) {
tests := []struct {
input string
expected interface{}
}{
{`len("")`, 0},
{`len("four")`, 4},
{`len("hello world")`, 11},
{`len(1)`, "argument to `len` not supported, got INTEGER"},
{`len("one", "two")`, "wrong number of arguments. got=2, want=1"},
{`len([1, 2, 3])`, 3},
{`len([])`, 0},
{`puts("hello", "world!")`, nil},
{`first([1, 2, 3])`, 1},
{`first([])`, nil},
{`first(1)`, "argument to `first` must be ARRAY, got INTEGER"},
{`last([1, 2, 3])`, 3},
{`last([])`, nil},
{`last(1)`, "argument to `last` must be ARRAY, got INTEGER"},
{`rest([1, 2, 3])`, []int{2, 3}},
{`rest([])`, nil},
{`push([], 1)`, []int{1}},
{`push(1, 1)`, "argument to `push` must be ARRAY, got INTEGER"},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
switch expected := tt.expected.(type) {
case int:
testIntegerObject(t, evaluated, int64(expected))
case nil:
testNullObject(t, evaluated)
case string:
errObj, ok := evaluated.(*object.Error)
if !ok {
t.Errorf("object is not Error. got=%T (%+v)",
evaluated, evaluated)
continue
}
if errObj.Message != expected {
t.Errorf("wrong error message. expected=%q, got=%q",
expected, errObj.Message)
}
case []int:
array, ok := evaluated.(*object.Array)
if !ok {
t.Errorf("obj not Array. got=%T (%+v)", evaluated, evaluated)
continue
}
if len(array.Elements) != len(expected) {
t.Errorf("wrong num of elements. want=%d, got=%d",
len(expected), len(array.Elements))
continue
}
for i, expectedElem := range expected {
testIntegerObject(t, array.Elements[i], int64(expectedElem))
}
}
}
}
func TestArrayLiterals(t *testing.T) {
input := "[1, 2 * 2, 3 + 3]"
evaluated := testEval(input)
result, ok := evaluated.(*object.Array)
if !ok {
t.Fatalf("object is not Array. got=%T (%+v)", evaluated, evaluated)
}
if len(result.Elements) != 3 {
t.Fatalf("array has wrong num of elements. got=%d",
len(result.Elements))
}
testIntegerObject(t, result.Elements[0], 1)
testIntegerObject(t, result.Elements[1], 4)
testIntegerObject(t, result.Elements[2], 6)
}
func TestArrayIndexExpressions(t *testing.T) {
tests := []struct {
input string
expected interface{}
}{
{
"[1, 2, 3][0]",
1,
},
{
"[1, 2, 3][1]",
2,
},
{
"[1, 2, 3][2]",
3,
},
{
"let i = 0; [1][i];",
1,
},
{
"[1, 2, 3][1 + 1];",
3,
},
{
"let myArray = [1, 2, 3]; myArray[2];",
3,
},
{
"let myArray = [1, 2, 3]; myArray[0] + myArray[1] + myArray[2];",
6,
},
{
"let myArray = [1, 2, 3]; let i = myArray[0]; myArray[i]",
2,
},
{
"[1, 2, 3][3]",
nil,
},
{
"[1, 2, 3][-1]",
nil,
},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
integer, ok := tt.expected.(int)
if ok {
testIntegerObject(t, evaluated, int64(integer))
} else {
testNullObject(t, evaluated)
}
}
}
func TestHashLiterals(t *testing.T) {
input := `let two = "two";
{
"one": 10 - 9,
two: 1 + 1,
"thr" + "ee": 6 / 2,
4: 4,
true: 5,
false: 6
}`
evaluated := testEval(input)
result, ok := evaluated.(*object.Hash)
if !ok {
t.Fatalf("Eval didn't return Hash. got=%T (%+v)", evaluated, evaluated)
}
expected := map[object.HashKey]int64{
(&object.String{Value: "one"}).HashKey(): 1,
(&object.String{Value: "two"}).HashKey(): 2,
(&object.String{Value: "three"}).HashKey(): 3,
(&object.Integer{Value: 4}).HashKey(): 4,
TRUE.HashKey(): 5,
FALSE.HashKey(): 6,
}
if len(result.Pairs) != len(expected) {
t.Fatalf("Hash has wrong num of pairs. got=%d", len(result.Pairs))
}
for expectedKey, expectedValue := range expected {
pair, ok := result.Pairs[expectedKey]
if !ok {
t.Errorf("no pair for given key in Pairs")
}
testIntegerObject(t, pair.Value, expectedValue)
}
}
func TestHashIndexExpressions(t *testing.T) {
tests := []struct {
input string
expected interface{}
}{
{
`{"foo": 5}["foo"]`,
5,
},
{
`{"foo": 5}["bar"]`,
nil,
},
{
`let key = "foo"; {"foo": 5}[key]`,
5,
},
{
`{}["foo"]`,
nil,
},
{
`{5: 5}[5]`,
5,
},
{
`{true: 5}[true]`,
5,
},
{
`{false: 5}[false]`,
5,
},
}
for _, tt := range tests {
evaluated := testEval(tt.input)
integer, ok := tt.expected.(int)
if ok {
testIntegerObject(t, evaluated, int64(integer))
} else {
testNullObject(t, evaluated)
}
}
}
func testEval(input string) object.Object {
l := lexer.New(input)
p := parser.New(l)
program := p.ParseProgram()
env := object.NewEnvironment()
return Eval(program, env)
}
func testIntegerObject(t *testing.T, obj object.Object, expected int64) bool {
result, ok := obj.(*object.Integer)
if !ok {
t.Errorf("object is not Integer. got=%T (%+v)", obj, obj)
return false
}
if result.Value != expected {
t.Errorf("object has wrong value. got=%d, want=%d",
result.Value, expected)
return false
}
return true
}
func testBooleanObject(t *testing.T, obj object.Object, expected bool) bool {
result, ok := obj.(*object.Boolean)
if !ok {
t.Errorf("object is not Boolean. got=%T (%+v)", obj, obj)
return false
}
if result.Value != expected {
t.Errorf("object has wrong value. got=%t, want=%t",
result.Value, expected)
return false
}
return true
}
func testNullObject(t *testing.T, obj object.Object) bool {
if obj != NULL {
t.Errorf("object is not NULL. got=%T (%+v)", obj, obj)
return false
}
return true
}

View file

@ -0,0 +1,3 @@
module monkey
go 1.14

View file

@ -0,0 +1,157 @@
package lexer
import "monkey/token"
type Lexer struct {
input string
position int // current position in input (points to current char)
readPosition int // current reading position in input (after current char)
ch byte // current char under examination
}
func New(input string) *Lexer {
l := &Lexer{input: input}
l.readChar()
return l
}
func (l *Lexer) NextToken() token.Token {
var tok token.Token
l.skipWhitespace()
switch l.ch {
case '=':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
literal := string(ch) + string(l.ch)
tok = token.Token{Type: token.EQ, Literal: literal}
} else {
tok = newToken(token.ASSIGN, l.ch)
}
case '+':
tok = newToken(token.PLUS, l.ch)
case '-':
tok = newToken(token.MINUS, l.ch)
case '!':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
literal := string(ch) + string(l.ch)
tok = token.Token{Type: token.NOT_EQ, Literal: literal}
} else {
tok = newToken(token.BANG, l.ch)
}
case '/':
tok = newToken(token.SLASH, l.ch)
case '*':
tok = newToken(token.ASTERISK, l.ch)
case '<':
tok = newToken(token.LT, l.ch)
case '>':
tok = newToken(token.GT, l.ch)
case ';':
tok = newToken(token.SEMICOLON, l.ch)
case ':':
tok = newToken(token.COLON, l.ch)
case ',':
tok = newToken(token.COMMA, l.ch)
case '{':
tok = newToken(token.LBRACE, l.ch)
case '}':
tok = newToken(token.RBRACE, l.ch)
case '(':
tok = newToken(token.LPAREN, l.ch)
case ')':
tok = newToken(token.RPAREN, l.ch)
case '"':
tok.Type = token.STRING
tok.Literal = l.readString()
case '[':
tok = newToken(token.LBRACKET, l.ch)
case ']':
tok = newToken(token.RBRACKET, l.ch)
case 0:
tok.Literal = ""
tok.Type = token.EOF
default:
if isLetter(l.ch) {
tok.Literal = l.readIdentifier()
tok.Type = token.LookupIdent(tok.Literal)
return tok
} else if isDigit(l.ch) {
tok.Type = token.INT
tok.Literal = l.readNumber()
return tok
} else {
tok = newToken(token.ILLEGAL, l.ch)
}
}
l.readChar()
return tok
}
func (l *Lexer) skipWhitespace() {
for l.ch == ' ' || l.ch == '\t' || l.ch == '\n' || l.ch == '\r' {
l.readChar()
}
}
func (l *Lexer) readChar() {
if l.readPosition >= len(l.input) {
l.ch = 0
} else {
l.ch = l.input[l.readPosition]
}
l.position = l.readPosition
l.readPosition += 1
}
func (l *Lexer) peekChar() byte {
if l.readPosition >= len(l.input) {
return 0
} else {
return l.input[l.readPosition]
}
}
func (l *Lexer) readIdentifier() string {
position := l.position
for isLetter(l.ch) {
l.readChar()
}
return l.input[position:l.position]
}
func (l *Lexer) readNumber() string {
position := l.position
for isDigit(l.ch) {
l.readChar()
}
return l.input[position:l.position]
}
func (l *Lexer) readString() string {
position := l.position + 1
for {
l.readChar()
if l.ch == '"' || l.ch == 0 {
break
}
}
return l.input[position:l.position]
}
func isLetter(ch byte) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_'
}
func isDigit(ch byte) bool {
return '0' <= ch && ch <= '9'
}
func newToken(tokenType token.TokenType, ch byte) token.Token {
return token.Token{Type: tokenType, Literal: string(ch)}
}

View file

@ -0,0 +1,143 @@
package lexer
import (
"testing"
"monkey/token"
)
func TestNextToken(t *testing.T) {
input := `let five = 5;
let ten = 10;
let add = fn(x, y) {
x + y;
};
let result = add(five, ten);
!-/*5;
5 < 10 > 5;
if (5 < 10) {
return true;
} else {
return false;
}
10 == 10;
10 != 9;
"foobar"
"foo bar"
[1, 2];
{"foo": "bar"}
`
tests := []struct {
expectedType token.TokenType
expectedLiteral string
}{
{token.LET, "let"},
{token.IDENT, "five"},
{token.ASSIGN, "="},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "ten"},
{token.ASSIGN, "="},
{token.INT, "10"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "add"},
{token.ASSIGN, "="},
{token.FUNCTION, "fn"},
{token.LPAREN, "("},
{token.IDENT, "x"},
{token.COMMA, ","},
{token.IDENT, "y"},
{token.RPAREN, ")"},
{token.LBRACE, "{"},
{token.IDENT, "x"},
{token.PLUS, "+"},
{token.IDENT, "y"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "result"},
{token.ASSIGN, "="},
{token.IDENT, "add"},
{token.LPAREN, "("},
{token.IDENT, "five"},
{token.COMMA, ","},
{token.IDENT, "ten"},
{token.RPAREN, ")"},
{token.SEMICOLON, ";"},
{token.BANG, "!"},
{token.MINUS, "-"},
{token.SLASH, "/"},
{token.ASTERISK, "*"},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.INT, "5"},
{token.LT, "<"},
{token.INT, "10"},
{token.GT, ">"},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.IF, "if"},
{token.LPAREN, "("},
{token.INT, "5"},
{token.LT, "<"},
{token.INT, "10"},
{token.RPAREN, ")"},
{token.LBRACE, "{"},
{token.RETURN, "return"},
{token.TRUE, "true"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.ELSE, "else"},
{token.LBRACE, "{"},
{token.RETURN, "return"},
{token.FALSE, "false"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.INT, "10"},
{token.EQ, "=="},
{token.INT, "10"},
{token.SEMICOLON, ";"},
{token.INT, "10"},
{token.NOT_EQ, "!="},
{token.INT, "9"},
{token.SEMICOLON, ";"},
{token.STRING, "foobar"},
{token.STRING, "foo bar"},
{token.LBRACKET, "["},
{token.INT, "1"},
{token.COMMA, ","},
{token.INT, "2"},
{token.RBRACKET, "]"},
{token.SEMICOLON, ";"},
{token.LBRACE, "{"},
{token.STRING, "foo"},
{token.COLON, ":"},
{token.STRING, "bar"},
{token.RBRACE, "}"},
{token.EOF, ""},
}
l := New(input)
for i, tt := range tests {
tok := l.NextToken()
if tok.Type != tt.expectedType {
t.Fatalf("tests[%d] - tokentype wrong. expected=%q, got=%q",
i, tt.expectedType, tok.Type)
}
if tok.Literal != tt.expectedLiteral {
t.Fatalf("tests[%d] - literal wrong. expected=%q, got=%q",
i, tt.expectedLiteral, tok.Literal)
}
}
}

View file

@ -0,0 +1,19 @@
package main
import (
"fmt"
"monkey/repl"
"os"
"os/user"
)
func main() {
user, err := user.Current()
if err != nil {
panic(err)
}
fmt.Printf("Hello %s! This is the Monkey programming language!\n",
user.Username)
fmt.Printf("Feel free to type in commands\n")
repl.Start(os.Stdin, os.Stdout)
}

View file

@ -0,0 +1,30 @@
package object
func NewEnclosedEnvironment(outer *Environment) *Environment {
env := NewEnvironment()
env.outer = outer
return env
}
func NewEnvironment() *Environment {
s := make(map[string]Object)
return &Environment{store: s, outer: nil}
}
type Environment struct {
store map[string]Object
outer *Environment
}
func (e *Environment) Get(name string) (Object, bool) {
obj, ok := e.store[name]
if !ok && e.outer != nil {
obj, ok = e.outer.Get(name)
}
return obj, ok
}
func (e *Environment) Set(name string, val Object) Object {
e.store[name] = val
return val
}

View file

@ -0,0 +1,182 @@
package object
import (
"bytes"
"fmt"
"hash/fnv"
"monkey/ast"
"strings"
)
type BuiltinFunction func(args ...Object) Object
type ObjectType string
const (
NULL_OBJ = "NULL"
ERROR_OBJ = "ERROR"
INTEGER_OBJ = "INTEGER"
BOOLEAN_OBJ = "BOOLEAN"
STRING_OBJ = "STRING"
RETURN_VALUE_OBJ = "RETURN_VALUE"
FUNCTION_OBJ = "FUNCTION"
BUILTIN_OBJ = "BUILTIN"
ARRAY_OBJ = "ARRAY"
HASH_OBJ = "HASH"
)
type HashKey struct {
Type ObjectType
Value uint64
}
type Hashable interface {
HashKey() HashKey
}
type Object interface {
Type() ObjectType
Inspect() string
}
type Integer struct {
Value int64
}
func (i *Integer) Type() ObjectType { return INTEGER_OBJ }
func (i *Integer) Inspect() string { return fmt.Sprintf("%d", i.Value) }
func (i *Integer) HashKey() HashKey {
return HashKey{Type: i.Type(), Value: uint64(i.Value)}
}
type Boolean struct {
Value bool
}
func (b *Boolean) Type() ObjectType { return BOOLEAN_OBJ }
func (b *Boolean) Inspect() string { return fmt.Sprintf("%t", b.Value) }
func (b *Boolean) HashKey() HashKey {
var value uint64
if b.Value {
value = 1
} else {
value = 0
}
return HashKey{Type: b.Type(), Value: value}
}
type Null struct{}
func (n *Null) Type() ObjectType { return NULL_OBJ }
func (n *Null) Inspect() string { return "null" }
type ReturnValue struct {
Value Object
}
func (rv *ReturnValue) Type() ObjectType { return RETURN_VALUE_OBJ }
func (rv *ReturnValue) Inspect() string { return rv.Value.Inspect() }
type Error struct {
Message string
}
func (e *Error) Type() ObjectType { return ERROR_OBJ }
func (e *Error) Inspect() string { return "ERROR: " + e.Message }
type Function struct {
Parameters []*ast.Identifier
Body *ast.BlockStatement
Env *Environment
}
func (f *Function) Type() ObjectType { return FUNCTION_OBJ }
func (f *Function) Inspect() string {
var out bytes.Buffer
params := []string{}
for _, p := range f.Parameters {
params = append(params, p.String())
}
out.WriteString("fn")
out.WriteString("(")
out.WriteString(strings.Join(params, ", "))
out.WriteString(") {\n")
out.WriteString(f.Body.String())
out.WriteString("\n}")
return out.String()
}
type String struct {
Value string
}
func (s *String) Type() ObjectType { return STRING_OBJ }
func (s *String) Inspect() string { return s.Value }
func (s *String) HashKey() HashKey {
h := fnv.New64a()
h.Write([]byte(s.Value))
return HashKey{Type: s.Type(), Value: h.Sum64()}
}
type Builtin struct {
Fn BuiltinFunction
}
func (b *Builtin) Type() ObjectType { return BUILTIN_OBJ }
func (b *Builtin) Inspect() string { return "builtin function" }
type Array struct {
Elements []Object
}
func (ao *Array) Type() ObjectType { return ARRAY_OBJ }
func (ao *Array) Inspect() string {
var out bytes.Buffer
elements := []string{}
for _, e := range ao.Elements {
elements = append(elements, e.Inspect())
}
out.WriteString("[")
out.WriteString(strings.Join(elements, ", "))
out.WriteString("]")
return out.String()
}
type HashPair struct {
Key Object
Value Object
}
type Hash struct {
Pairs map[HashKey]HashPair
}
func (h *Hash) Type() ObjectType { return HASH_OBJ }
func (h *Hash) Inspect() string {
var out bytes.Buffer
pairs := []string{}
for _, pair := range h.Pairs {
pairs = append(pairs, fmt.Sprintf("%s: %s",
pair.Key.Inspect(), pair.Value.Inspect()))
}
out.WriteString("{")
out.WriteString(strings.Join(pairs, ", "))
out.WriteString("}")
return out.String()
}

View file

@ -0,0 +1,60 @@
package object
import "testing"
func TestStringHashKey(t *testing.T) {
hello1 := &String{Value: "Hello World"}
hello2 := &String{Value: "Hello World"}
diff1 := &String{Value: "My name is johnny"}
diff2 := &String{Value: "My name is johnny"}
if hello1.HashKey() != hello2.HashKey() {
t.Errorf("strings with same content have different hash keys")
}
if diff1.HashKey() != diff2.HashKey() {
t.Errorf("strings with same content have different hash keys")
}
if hello1.HashKey() == diff1.HashKey() {
t.Errorf("strings with different content have same hash keys")
}
}
func TestBooleanHashKey(t *testing.T) {
true1 := &Boolean{Value: true}
true2 := &Boolean{Value: true}
false1 := &Boolean{Value: false}
false2 := &Boolean{Value: false}
if true1.HashKey() != true2.HashKey() {
t.Errorf("trues do not have same hash key")
}
if false1.HashKey() != false2.HashKey() {
t.Errorf("falses do not have same hash key")
}
if true1.HashKey() == false1.HashKey() {
t.Errorf("true has same hash key as false")
}
}
func TestIntegerHashKey(t *testing.T) {
one1 := &Integer{Value: 1}
one2 := &Integer{Value: 1}
two1 := &Integer{Value: 2}
two2 := &Integer{Value: 2}
if one1.HashKey() != one2.HashKey() {
t.Errorf("integers with same content have twoerent hash keys")
}
if two1.HashKey() != two2.HashKey() {
t.Errorf("integers with same content have twoerent hash keys")
}
if one1.HashKey() == two1.HashKey() {
t.Errorf("integers with twoerent content have same hash keys")
}
}

View file

@ -0,0 +1,491 @@
package parser
import (
"fmt"
"monkey/ast"
"monkey/lexer"
"monkey/token"
"strconv"
)
const (
_ int = iota
LOWEST
EQUALS // ==
LESSGREATER // > or <
SUM // +
PRODUCT // *
PREFIX // -X or !X
CALL // myFunction(X)
INDEX // array[index]
)
var precedences = map[token.TokenType]int{
token.EQ: EQUALS,
token.NOT_EQ: EQUALS,
token.LT: LESSGREATER,
token.GT: LESSGREATER,
token.PLUS: SUM,
token.MINUS: SUM,
token.SLASH: PRODUCT,
token.ASTERISK: PRODUCT,
token.LPAREN: CALL,
token.LBRACKET: INDEX,
}
type (
prefixParseFn func() ast.Expression
infixParseFn func(ast.Expression) ast.Expression
)
type Parser struct {
l *lexer.Lexer
errors []string
curToken token.Token
peekToken token.Token
prefixParseFns map[token.TokenType]prefixParseFn
infixParseFns map[token.TokenType]infixParseFn
}
func New(l *lexer.Lexer) *Parser {
p := &Parser{
l: l,
errors: []string{},
}
p.prefixParseFns = make(map[token.TokenType]prefixParseFn)
p.registerPrefix(token.IDENT, p.parseIdentifier)
p.registerPrefix(token.INT, p.parseIntegerLiteral)
p.registerPrefix(token.STRING, p.parseStringLiteral)
p.registerPrefix(token.BANG, p.parsePrefixExpression)
p.registerPrefix(token.MINUS, p.parsePrefixExpression)
p.registerPrefix(token.TRUE, p.parseBoolean)
p.registerPrefix(token.FALSE, p.parseBoolean)
p.registerPrefix(token.LPAREN, p.parseGroupedExpression)
p.registerPrefix(token.IF, p.parseIfExpression)
p.registerPrefix(token.FUNCTION, p.parseFunctionLiteral)
p.registerPrefix(token.LBRACKET, p.parseArrayLiteral)
p.registerPrefix(token.LBRACE, p.parseHashLiteral)
p.infixParseFns = make(map[token.TokenType]infixParseFn)
p.registerInfix(token.PLUS, p.parseInfixExpression)
p.registerInfix(token.MINUS, p.parseInfixExpression)
p.registerInfix(token.SLASH, p.parseInfixExpression)
p.registerInfix(token.ASTERISK, p.parseInfixExpression)
p.registerInfix(token.EQ, p.parseInfixExpression)
p.registerInfix(token.NOT_EQ, p.parseInfixExpression)
p.registerInfix(token.LT, p.parseInfixExpression)
p.registerInfix(token.GT, p.parseInfixExpression)
p.registerInfix(token.LPAREN, p.parseCallExpression)
p.registerInfix(token.LBRACKET, p.parseIndexExpression)
// Read two tokens, so curToken and peekToken are both set
p.nextToken()
p.nextToken()
return p
}
func (p *Parser) nextToken() {
p.curToken = p.peekToken
p.peekToken = p.l.NextToken()
}
func (p *Parser) curTokenIs(t token.TokenType) bool {
return p.curToken.Type == t
}
func (p *Parser) peekTokenIs(t token.TokenType) bool {
return p.peekToken.Type == t
}
func (p *Parser) expectPeek(t token.TokenType) bool {
if p.peekTokenIs(t) {
p.nextToken()
return true
} else {
p.peekError(t)
return false
}
}
func (p *Parser) Errors() []string {
return p.errors
}
func (p *Parser) peekError(t token.TokenType) {
msg := fmt.Sprintf("expected next token to be %s, got %s instead",
t, p.peekToken.Type)
p.errors = append(p.errors, msg)
}
func (p *Parser) noPrefixParseFnError(t token.TokenType) {
msg := fmt.Sprintf("no prefix parse function for %s found", t)
p.errors = append(p.errors, msg)
}
func (p *Parser) ParseProgram() *ast.Program {
program := &ast.Program{}
program.Statements = []ast.Statement{}
for !p.curTokenIs(token.EOF) {
stmt := p.parseStatement()
if stmt != nil {
program.Statements = append(program.Statements, stmt)
}
p.nextToken()
}
return program
}
func (p *Parser) parseStatement() ast.Statement {
switch p.curToken.Type {
case token.LET:
return p.parseLetStatement()
case token.RETURN:
return p.parseReturnStatement()
default:
return p.parseExpressionStatement()
}
}
func (p *Parser) parseLetStatement() *ast.LetStatement {
stmt := &ast.LetStatement{Token: p.curToken}
if !p.expectPeek(token.IDENT) {
return nil
}
stmt.Name = &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
if !p.expectPeek(token.ASSIGN) {
return nil
}
p.nextToken()
stmt.Value = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) parseReturnStatement() *ast.ReturnStatement {
stmt := &ast.ReturnStatement{Token: p.curToken}
p.nextToken()
stmt.ReturnValue = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) parseExpressionStatement() *ast.ExpressionStatement {
stmt := &ast.ExpressionStatement{Token: p.curToken}
stmt.Expression = p.parseExpression(LOWEST)
if p.peekTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) parseExpression(precedence int) ast.Expression {
prefix := p.prefixParseFns[p.curToken.Type]
if prefix == nil {
p.noPrefixParseFnError(p.curToken.Type)
return nil
}
leftExp := prefix()
for !p.peekTokenIs(token.SEMICOLON) && precedence < p.peekPrecedence() {
infix := p.infixParseFns[p.peekToken.Type]
if infix == nil {
return leftExp
}
p.nextToken()
leftExp = infix(leftExp)
}
return leftExp
}
func (p *Parser) peekPrecedence() int {
if p, ok := precedences[p.peekToken.Type]; ok {
return p
}
return LOWEST
}
func (p *Parser) curPrecedence() int {
if p, ok := precedences[p.curToken.Type]; ok {
return p
}
return LOWEST
}
func (p *Parser) parseIdentifier() ast.Expression {
return &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
}
func (p *Parser) parseIntegerLiteral() ast.Expression {
lit := &ast.IntegerLiteral{Token: p.curToken}
value, err := strconv.ParseInt(p.curToken.Literal, 0, 64)
if err != nil {
msg := fmt.Sprintf("could not parse %q as integer", p.curToken.Literal)
p.errors = append(p.errors, msg)
return nil
}
lit.Value = value
return lit
}
func (p *Parser) parseStringLiteral() ast.Expression {
return &ast.StringLiteral{Token: p.curToken, Value: p.curToken.Literal}
}
func (p *Parser) parsePrefixExpression() ast.Expression {
expression := &ast.PrefixExpression{
Token: p.curToken,
Operator: p.curToken.Literal,
}
p.nextToken()
expression.Right = p.parseExpression(PREFIX)
return expression
}
func (p *Parser) parseInfixExpression(left ast.Expression) ast.Expression {
expression := &ast.InfixExpression{
Token: p.curToken,
Operator: p.curToken.Literal,
Left: left,
}
precedence := p.curPrecedence()
p.nextToken()
expression.Right = p.parseExpression(precedence)
return expression
}
func (p *Parser) parseBoolean() ast.Expression {
return &ast.Boolean{Token: p.curToken, Value: p.curTokenIs(token.TRUE)}
}
func (p *Parser) parseGroupedExpression() ast.Expression {
p.nextToken()
exp := p.parseExpression(LOWEST)
if !p.expectPeek(token.RPAREN) {
return nil
}
return exp
}
func (p *Parser) parseIfExpression() ast.Expression {
expression := &ast.IfExpression{Token: p.curToken}
if !p.expectPeek(token.LPAREN) {
return nil
}
p.nextToken()
expression.Condition = p.parseExpression(LOWEST)
if !p.expectPeek(token.RPAREN) {
return nil
}
if !p.expectPeek(token.LBRACE) {
return nil
}
expression.Consequence = p.parseBlockStatement()
if p.peekTokenIs(token.ELSE) {
p.nextToken()
if !p.expectPeek(token.LBRACE) {
return nil
}
expression.Alternative = p.parseBlockStatement()
}
return expression
}
func (p *Parser) parseBlockStatement() *ast.BlockStatement {
block := &ast.BlockStatement{Token: p.curToken}
block.Statements = []ast.Statement{}
p.nextToken()
for !p.curTokenIs(token.RBRACE) && !p.curTokenIs(token.EOF) {
stmt := p.parseStatement()
if stmt != nil {
block.Statements = append(block.Statements, stmt)
}
p.nextToken()
}
return block
}
func (p *Parser) parseFunctionLiteral() ast.Expression {
lit := &ast.FunctionLiteral{Token: p.curToken}
if !p.expectPeek(token.LPAREN) {
return nil
}
lit.Parameters = p.parseFunctionParameters()
if !p.expectPeek(token.LBRACE) {
return nil
}
lit.Body = p.parseBlockStatement()
return lit
}
func (p *Parser) parseFunctionParameters() []*ast.Identifier {
identifiers := []*ast.Identifier{}
if p.peekTokenIs(token.RPAREN) {
p.nextToken()
return identifiers
}
p.nextToken()
ident := &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
identifiers = append(identifiers, ident)
for p.peekTokenIs(token.COMMA) {
p.nextToken()
p.nextToken()
ident := &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
identifiers = append(identifiers, ident)
}
if !p.expectPeek(token.RPAREN) {
return nil
}
return identifiers
}
func (p *Parser) parseCallExpression(function ast.Expression) ast.Expression {
exp := &ast.CallExpression{Token: p.curToken, Function: function}
exp.Arguments = p.parseExpressionList(token.RPAREN)
return exp
}
func (p *Parser) parseExpressionList(end token.TokenType) []ast.Expression {
list := []ast.Expression{}
if p.peekTokenIs(end) {
p.nextToken()
return list
}
p.nextToken()
list = append(list, p.parseExpression(LOWEST))
for p.peekTokenIs(token.COMMA) {
p.nextToken()
p.nextToken()
list = append(list, p.parseExpression(LOWEST))
}
if !p.expectPeek(end) {
return nil
}
return list
}
func (p *Parser) parseArrayLiteral() ast.Expression {
array := &ast.ArrayLiteral{Token: p.curToken}
array.Elements = p.parseExpressionList(token.RBRACKET)
return array
}
func (p *Parser) parseIndexExpression(left ast.Expression) ast.Expression {
exp := &ast.IndexExpression{Token: p.curToken, Left: left}
p.nextToken()
exp.Index = p.parseExpression(LOWEST)
if !p.expectPeek(token.RBRACKET) {
return nil
}
return exp
}
func (p *Parser) parseHashLiteral() ast.Expression {
hash := &ast.HashLiteral{Token: p.curToken}
hash.Pairs = make(map[ast.Expression]ast.Expression)
for !p.peekTokenIs(token.RBRACE) {
p.nextToken()
key := p.parseExpression(LOWEST)
if !p.expectPeek(token.COLON) {
return nil
}
p.nextToken()
value := p.parseExpression(LOWEST)
hash.Pairs[key] = value
if !p.peekTokenIs(token.RBRACE) && !p.expectPeek(token.COMMA) {
return nil
}
}
if !p.expectPeek(token.RBRACE) {
return nil
}
return hash
}
func (p *Parser) registerPrefix(tokenType token.TokenType, fn prefixParseFn) {
p.prefixParseFns[tokenType] = fn
}
func (p *Parser) registerInfix(tokenType token.TokenType, fn infixParseFn) {
p.infixParseFns[tokenType] = fn
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,32 @@
package parser
import (
"fmt"
"strings"
)
var traceLevel int = 0
const traceIdentPlaceholder string = "\t"
func identLevel() string {
return strings.Repeat(traceIdentPlaceholder, traceLevel-1)
}
func tracePrint(fs string) {
fmt.Printf("%s%s\n", identLevel(), fs)
}
func incIdent() { traceLevel = traceLevel + 1 }
func decIdent() { traceLevel = traceLevel - 1 }
func trace(msg string) string {
incIdent()
tracePrint("BEGIN " + msg)
return msg
}
func untrace(msg string) {
tracePrint("END " + msg)
decIdent()
}

View file

@ -0,0 +1,64 @@
package repl
import (
"bufio"
"fmt"
"io"
"monkey/evaluator"
"monkey/lexer"
"monkey/object"
"monkey/parser"
)
const PROMPT = ">> "
func Start(in io.Reader, out io.Writer) {
scanner := bufio.NewScanner(in)
env := object.NewEnvironment()
for {
fmt.Fprintf(out, PROMPT)
scanned := scanner.Scan()
if !scanned {
return
}
line := scanner.Text()
l := lexer.New(line)
p := parser.New(l)
program := p.ParseProgram()
if len(p.Errors()) != 0 {
printParserErrors(out, p.Errors())
continue
}
evaluated := evaluator.Eval(program, env)
if evaluated != nil {
io.WriteString(out, evaluated.Inspect())
io.WriteString(out, "\n")
}
}
}
const MONKEY_FACE = ` __,__
.--. .-" "-. .--.
/ .. \/ .-. .-. \/ .. \
| | '| / Y \ |' | |
| \ \ \ 0 | 0 / / / |
\ '- ,\.-"""""""-./, -' /
''-' /_ ^ ^ _\ '-''
| \._ _./ |
\ \ '~' / /
'._ '-=-' _.'
'-----'
`
func printParserErrors(out io.Writer, errors []string) {
io.WriteString(out, MONKEY_FACE)
io.WriteString(out, "Woops! We ran into some monkey business here!\n")
io.WriteString(out, " parser errors:\n")
for _, msg := range errors {
io.WriteString(out, "\t"+msg+"\n")
}
}

View file

@ -0,0 +1,70 @@
package token
type TokenType string
const (
ILLEGAL = "ILLEGAL"
EOF = "EOF"
// Identifiers + literals
IDENT = "IDENT" // add, foobar, x, y, ...
INT = "INT" // 1343456
STRING = "STRING" // "foobar"
// Operators
ASSIGN = "="
PLUS = "+"
MINUS = "-"
BANG = "!"
ASTERISK = "*"
SLASH = "/"
LT = "<"
GT = ">"
EQ = "=="
NOT_EQ = "!="
// Delimiters
COMMA = ","
SEMICOLON = ";"
COLON = ":"
LPAREN = "("
RPAREN = ")"
LBRACE = "{"
RBRACE = "}"
LBRACKET = "["
RBRACKET = "]"
// Keywords
FUNCTION = "FUNCTION"
LET = "LET"
TRUE = "TRUE"
FALSE = "FALSE"
IF = "IF"
ELSE = "ELSE"
RETURN = "RETURN"
)
type Token struct {
Type TokenType
Literal string
}
var keywords = map[string]TokenType{
"fn": FUNCTION,
"let": LET,
"true": TRUE,
"false": FALSE,
"if": IF,
"else": ELSE,
"return": RETURN,
}
func LookupIdent(ident string) TokenType {
if tok, ok := keywords[ident]; ok {
return tok
}
return IDENT
}

24
waiig_code_1.7/LICENSE Normal file
View file

@ -0,0 +1,24 @@
------------------------------------------------------------------------------
This folder (called the `code` folder) and its content, accompanying "Writing An
Interpreter In Go", are licensed under the MIT License (MIT), which follows.
------------------------------------------------------------------------------
Copyright (c) 2016-2017 Thorsten Ball
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

29
waiig_code_1.7/README.md Normal file
View file

@ -0,0 +1,29 @@
# Writing An Interpreter In Go
Thank you for purchasing "Writing An Interpreter In Go"!
In the `code` folder you'll find the final result of each chapter. The code
should be compilable and runnable if you set your $GOPATH to the top-level
folder for each chapter. Every chapter folder also contains an `.envrc` file. If
you have direnv (http://direnv.net/) installed then the $GOPATH should be
automatically set for you as soon as you `cd` into the directories.
If you have any question, send me an email: me@thorstenball.com
Enjoy the book!
Thorsten
--
Copyright © 2016-2020 Thorsten Ball
All rights reserved.
"Writing An Interpreter In Go" is copyright Thorsten Ball.
No part of this publication may be reproduced, stored in a retrieval system, or
transmitted, in any form, or by any means, electronic, mechanical, photocopying,
recording, or otherwise, without the prior consent of the publisher.
EXCEPT: the contents `code` folder are licensed under the MIT license
(https://opensource.org/licenses/MIT). See the `LICENSE` file in the `code`
folder.