Parse call expressions and gets rid of the TODOs in parse let and return.
Signed-off-by: jmug <u.g.a.mariano@gmail.com>
This commit is contained in:
parent
985cf24fbc
commit
44f936affb
4 changed files with 165 additions and 46 deletions
31
pkg/ast/call.go
Normal file
31
pkg/ast/call.go
Normal file
|
|
@ -0,0 +1,31 @@
|
||||||
|
package ast
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"code.jmug.me/jmug/interpreter-in-go/pkg/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CallExpression struct {
|
||||||
|
Token token.Token // The ( token
|
||||||
|
Function Expression
|
||||||
|
Arguments []Expression
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ce *CallExpression) expressionNode() {}
|
||||||
|
func (ce *CallExpression) TokenLiteral() string {
|
||||||
|
return ce.Token.Literal
|
||||||
|
}
|
||||||
|
func (ce *CallExpression) String() string {
|
||||||
|
var out bytes.Buffer
|
||||||
|
out.WriteString(ce.Function.String())
|
||||||
|
out.WriteString("(")
|
||||||
|
args := []string{}
|
||||||
|
for _, arg := range ce.Arguments {
|
||||||
|
args = append(args, arg.String())
|
||||||
|
}
|
||||||
|
out.WriteString(strings.Join(args, ", "))
|
||||||
|
out.WriteString(")")
|
||||||
|
return out.String()
|
||||||
|
}
|
||||||
|
|
@ -49,6 +49,7 @@ func New(l *lexer.Lexer) *Parser {
|
||||||
p.registerInfix(token.LT, p.parseInfixExpression)
|
p.registerInfix(token.LT, p.parseInfixExpression)
|
||||||
p.registerInfix(token.EQ, p.parseInfixExpression)
|
p.registerInfix(token.EQ, p.parseInfixExpression)
|
||||||
p.registerInfix(token.NOT_EQ, p.parseInfixExpression)
|
p.registerInfix(token.NOT_EQ, p.parseInfixExpression)
|
||||||
|
p.registerInfix(token.LPAREN, p.parseCallExpression)
|
||||||
// TODO: figure out why this can't be done from `parseProgram`
|
// TODO: figure out why this can't be done from `parseProgram`
|
||||||
p.nextToken()
|
p.nextToken()
|
||||||
p.nextToken()
|
p.nextToken()
|
||||||
|
|
@ -90,7 +91,7 @@ func (p *Parser) parseBlockStatement() *ast.BlockStatement {
|
||||||
if stmt != nil {
|
if stmt != nil {
|
||||||
block.Statements = append(block.Statements, stmt)
|
block.Statements = append(block.Statements, stmt)
|
||||||
}
|
}
|
||||||
// Consume the semicolon.
|
// Consume the last token in the statement.
|
||||||
p.nextToken()
|
p.nextToken()
|
||||||
}
|
}
|
||||||
return block
|
return block
|
||||||
|
|
@ -105,8 +106,10 @@ func (p *Parser) parseLetStatement() ast.Statement {
|
||||||
if !p.nextTokenIfPeekIs(token.ASSIGN) {
|
if !p.nextTokenIfPeekIs(token.ASSIGN) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// TODO: Skipping until we find the semicolon to avoid parsing the expression.
|
// Consume the assign.
|
||||||
for !p.curTokenIs(token.SEMICOLON) {
|
p.nextToken()
|
||||||
|
stmt.Value = p.parseExpression(LOWEST)
|
||||||
|
if p.peekTokenIs(token.SEMICOLON) {
|
||||||
p.nextToken()
|
p.nextToken()
|
||||||
}
|
}
|
||||||
return stmt
|
return stmt
|
||||||
|
|
@ -115,8 +118,8 @@ func (p *Parser) parseLetStatement() ast.Statement {
|
||||||
func (p *Parser) parseReturnStatement() ast.Statement {
|
func (p *Parser) parseReturnStatement() ast.Statement {
|
||||||
stmt := &ast.ReturnStatement{Token: p.curToken}
|
stmt := &ast.ReturnStatement{Token: p.curToken}
|
||||||
p.nextToken()
|
p.nextToken()
|
||||||
// TODO: Skipping until we find the semicolon to avoid parsing the expression.
|
stmt.ReturnValue = p.parseExpression(LOWEST)
|
||||||
for !p.curTokenIs(token.SEMICOLON) {
|
if p.peekTokenIs(token.SEMICOLON) {
|
||||||
p.nextToken()
|
p.nextToken()
|
||||||
}
|
}
|
||||||
return stmt
|
return stmt
|
||||||
|
|
@ -272,6 +275,35 @@ func (p *Parser) parseFunctionParameters() []*ast.Identifier {
|
||||||
return params
|
return params
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Parser) parseCallExpression(function ast.Expression) ast.Expression {
|
||||||
|
call := &ast.CallExpression{Token: p.curToken, Function: function}
|
||||||
|
call.Arguments = p.parseCallArguments()
|
||||||
|
return call
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) parseCallArguments() []ast.Expression {
|
||||||
|
args := []ast.Expression{}
|
||||||
|
if p.peekTokenIs(token.RPAREN) {
|
||||||
|
p.nextToken()
|
||||||
|
return args
|
||||||
|
}
|
||||||
|
// Consume the LPAREN
|
||||||
|
p.nextToken()
|
||||||
|
args = append(args, p.parseExpression(LOWEST))
|
||||||
|
for p.peekTokenIs(token.COMMA) {
|
||||||
|
// Consume last token of the previous expression.
|
||||||
|
p.nextToken()
|
||||||
|
// Consume the comma.
|
||||||
|
p.nextToken()
|
||||||
|
args = append(args, p.parseExpression(LOWEST))
|
||||||
|
}
|
||||||
|
if !p.nextTokenIfPeekIs(token.RPAREN) {
|
||||||
|
// TODO: Would be good to emit an error here.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return args
|
||||||
|
}
|
||||||
|
|
||||||
func (p *Parser) curTokenIs(typ token.TokenType) bool {
|
func (p *Parser) curTokenIs(typ token.TokenType) bool {
|
||||||
return p.curToken.Type == typ
|
return p.curToken.Type == typ
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,37 +9,36 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLetStatements(t *testing.T) {
|
func TestLetStatements(t *testing.T) {
|
||||||
input := `
|
|
||||||
let x = 5;
|
|
||||||
let y = 10;
|
|
||||||
let foobar = 838383;
|
|
||||||
`
|
|
||||||
l := lexer.New(input)
|
|
||||||
p := New(l)
|
|
||||||
|
|
||||||
program := p.ParseProgram()
|
|
||||||
checkParserErrors(t, p)
|
|
||||||
if program == nil {
|
|
||||||
t.Fatalf("ParseProgram() returned nil")
|
|
||||||
}
|
|
||||||
if len(program.Statements) != 3 {
|
|
||||||
t.Fatalf("program.Statements does not contain 3 statements. got=%d",
|
|
||||||
len(program.Statements))
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
input string
|
||||||
expectedIdentifier string
|
expectedIdentifier string
|
||||||
|
expectedValue any
|
||||||
}{
|
}{
|
||||||
{"x"},
|
{"let x = 5;", "x", 5},
|
||||||
{"y"},
|
{"let y = true;", "y", true},
|
||||||
{"foobar"},
|
{"let foobar = y;", "foobar", "y"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tt := range tests {
|
for _, tt := range tests {
|
||||||
stmt := program.Statements[i]
|
l := lexer.New(tt.input)
|
||||||
|
p := New(l)
|
||||||
|
program := p.ParseProgram()
|
||||||
|
checkParserErrors(t, p)
|
||||||
|
|
||||||
|
if len(program.Statements) != 1 {
|
||||||
|
t.Fatalf("program.Statements does not contain 1 statements. got=%d",
|
||||||
|
len(program.Statements))
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt := program.Statements[0]
|
||||||
if !testLetStatement(t, stmt, tt.expectedIdentifier) {
|
if !testLetStatement(t, stmt, tt.expectedIdentifier) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
val := stmt.(*ast.LetStatement).Value
|
||||||
|
if !testLiteralExpression(t, val, tt.expectedValue) {
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -70,32 +69,38 @@ func testLetStatement(t *testing.T, s ast.Statement, name string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReturnStatements(t *testing.T) {
|
func TestReturnStatements(t *testing.T) {
|
||||||
input := `
|
tests := []struct {
|
||||||
return 5;
|
input string
|
||||||
return 10;
|
expectedValue interface{}
|
||||||
return 993322;
|
}{
|
||||||
`
|
{"return 5;", 5},
|
||||||
l := lexer.New(input)
|
{"return true;", true},
|
||||||
p := New(l)
|
{"return foobar;", "foobar"},
|
||||||
|
|
||||||
program := p.ParseProgram()
|
|
||||||
checkParserErrors(t, p)
|
|
||||||
|
|
||||||
if len(program.Statements) != 3 {
|
|
||||||
t.Fatalf("program.Statements does not contain 3 statements. got=%d",
|
|
||||||
len(program.Statements))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, stmt := range program.Statements {
|
for _, tt := range tests {
|
||||||
|
l := lexer.New(tt.input)
|
||||||
|
p := New(l)
|
||||||
|
program := p.ParseProgram()
|
||||||
|
checkParserErrors(t, p)
|
||||||
|
|
||||||
|
if len(program.Statements) != 1 {
|
||||||
|
t.Fatalf("program.Statements does not contain 1 statements. got=%d",
|
||||||
|
len(program.Statements))
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt := program.Statements[0]
|
||||||
returnStmt, ok := stmt.(*ast.ReturnStatement)
|
returnStmt, ok := stmt.(*ast.ReturnStatement)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Errorf("stmt not *ast.ReturnStatement. got=%T", stmt)
|
t.Fatalf("stmt not *ast.ReturnStatement. got=%T", stmt)
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
if returnStmt.TokenLiteral() != "return" {
|
if returnStmt.TokenLiteral() != "return" {
|
||||||
t.Errorf("returnStmt.TokenLiteral not 'return', got %q",
|
t.Fatalf("returnStmt.TokenLiteral not 'return', got %q",
|
||||||
returnStmt.TokenLiteral())
|
returnStmt.TokenLiteral())
|
||||||
}
|
}
|
||||||
|
if testLiteralExpression(t, returnStmt.ReturnValue, tt.expectedValue) {
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -336,6 +341,18 @@ func TestOperatorPrecedenceParsing(t *testing.T) {
|
||||||
"!(true == true)",
|
"!(true == true)",
|
||||||
"(!(true == true))",
|
"(!(true == true))",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"a + add(b * c) + d",
|
||||||
|
"((a + add((b * c))) + d)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"add(a, b, 1, 2 * 3, 4 + 5, add(6, 7 * 8))",
|
||||||
|
"add(a, b, 1, (2 * 3), (4 + 5), add(6, (7 * 8)))",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"add(a + b + c * d / f + g)",
|
||||||
|
"add((((a + b) + ((c * d) / f)) + g))",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
|
@ -573,6 +590,44 @@ func TestFunctionParameterParsing(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCallExpressionParsing(t *testing.T) {
|
||||||
|
input := "add(1, 2 * 3, 4 + 5);"
|
||||||
|
|
||||||
|
l := lexer.New(input)
|
||||||
|
p := New(l)
|
||||||
|
program := p.ParseProgram()
|
||||||
|
checkParserErrors(t, p)
|
||||||
|
|
||||||
|
if len(program.Statements) != 1 {
|
||||||
|
t.Fatalf("program.Statements does not contain %d statements. got=%d\n",
|
||||||
|
1, len(program.Statements))
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt, ok := program.Statements[0].(*ast.ExpressionStatement)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("stmt is not ast.ExpressionStatement. got=%T",
|
||||||
|
program.Statements[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
exp, ok := stmt.Expression.(*ast.CallExpression)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("stmt.Expression is not ast.CallExpression. got=%T",
|
||||||
|
stmt.Expression)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !testIdentifier(t, exp.Function, "add") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(exp.Arguments) != 3 {
|
||||||
|
t.Fatalf("wrong length of arguments. got=%d", len(exp.Arguments))
|
||||||
|
}
|
||||||
|
|
||||||
|
testLiteralExpression(t, exp.Arguments[0], 1)
|
||||||
|
testInfixExpression(t, exp.Arguments[1], 2, "*", 3)
|
||||||
|
testInfixExpression(t, exp.Arguments[2], 4, "+", 5)
|
||||||
|
}
|
||||||
|
|
||||||
func testIdentifier(t *testing.T, exp ast.Expression, value string) bool {
|
func testIdentifier(t *testing.T, exp ast.Expression, value string) bool {
|
||||||
ident, ok := exp.(*ast.Identifier)
|
ident, ok := exp.(*ast.Identifier)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|
|
||||||
|
|
@ -24,6 +24,7 @@ var precedences = map[token.TokenType]int{
|
||||||
token.MINUS: SUM,
|
token.MINUS: SUM,
|
||||||
token.ASTERISK: PRODUCT,
|
token.ASTERISK: PRODUCT,
|
||||||
token.SLASH: PRODUCT,
|
token.SLASH: PRODUCT,
|
||||||
|
token.LPAREN: CALL,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) peekPrecedence() int {
|
func (p *Parser) peekPrecedence() int {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue