Skip to content

Commit

Permalink
add e notation for number
Browse files Browse the repository at this point in the history
  • Loading branch information
gravataLonga committed Aug 1, 2022
1 parent df2db13 commit 343f184
Show file tree
Hide file tree
Showing 2 changed files with 116 additions and 18 deletions.
25 changes: 24 additions & 1 deletion lexer/lexer.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"encoding/hex"
"github.com/gravataLonga/ninja/token"
"io"
"strconv"
"strings"
)

Expand Down Expand Up @@ -200,7 +201,7 @@ func (l *Lexer) readIdentifier() []byte {
// readDigit read integer and floats
func (l *Lexer) readDigit() []byte {
position := l.position
for isDigit(l.ch) || (l.ch == '.' && isDigit(l.peekChar())) {
for isDigit(l.ch) || (l.ch == '.' && isDigit(l.peekChar())) || (l.ch == 'e' && isDigit(l.peekChar())) {
l.readChar()
}
return []byte(l.input[position:l.position])
Expand Down Expand Up @@ -253,8 +254,30 @@ func (l *Lexer) readString() (string, error) {
b.WriteByte('\r')
case 't':
b.WriteByte('\t')
case 'b':
b.WriteByte('\b')
case 'f':
b.WriteByte('\f')
case '\\':
b.WriteByte('\\')
case '/':
b.WriteByte('/')
case 'u':
// Skip over the the '\\', 'u' and the next four bytes (unicode)
chars := []byte{}
l.readChar()
for n := 0; n <= 3; n++ {
chars = append(chars, l.ch)
l.readChar()
}
chars = append(chars, l.ch)
src := string(chars)
dst, err := strconv.Unquote(`"\` + src + `"`)
if err != nil {
return "", err
}
b.WriteString(dst)
continue
case 'x':
// Skip over the the '\\', 'x' and the next two bytes (hex)
l.readChar()
Expand Down
109 changes: 92 additions & 17 deletions lexer/lexer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,28 +187,103 @@ func TestStringAcceptUtf8Character(t *testing.T) {
}

func TestLexerReadString(t *testing.T) {
input := `"\"foo\"";"\x00\x0a\x7f";"\r\n\t"`
tests := []struct {
input string
expected string
}{
{
`"\"foo\"`,
`"foo"`,
},
{
`"\x00\x0a\x7f"`,
"\x00\n\u007f",
},
{
`"\r\n\t\b\f"`,
"\r\n\t\b\f",
},
{
`"\""`,
"\"",
},
{
`"\\"`,
"\\",
},
{
`"\/"`,
"/",
},
{
`"\u006E\u0069\u006E\u006A\u0061"`,
"ninja",
},
}

for i, tt := range tests {
t.Run(fmt.Sprintf("TestLexerReadString[%d]", i), func(t *testing.T) {
lexer := New(strings.NewReader(tt.input))
tok := lexer.NextToken()

if tok.Type != token.STRING {
t.Fatalf("token type wrong. expected=%q, got=%q", token.STRING, tok.Type)
}

if tok.Literal != tt.expected {
t.Fatalf("literal wrong. expected=%q, got=%q", tt.expected, tok.Literal)
}
})

}
}

func TestLexerReadNumber(t *testing.T) {
tests := []struct {
expectedType token.TokenType
expectedLiteral string
input string
expected string
expectedTokenType token.TokenType
}{
{token.STRING, "\"foo\""},
{token.SEMICOLON, ";"},
{token.STRING, "\x00\n\u007f"},
{token.SEMICOLON, ";"},
{token.STRING, "\r\n\t"},
{
`1`,
`1`,
token.INT,
},
{
`1234`,
`1234`,
token.INT,
},
{
`0`,
`0`,
token.INT,
},
{
`0.0`,
`0.0`,
token.FLOAT,
},
{
`1e3`,
`1e3`,
token.INT,
},
}
lexer := New(strings.NewReader(input))

for _, test := range tests {
tok := lexer.NextToken()
if tok.Type != test.expectedType {
t.Fatalf("token type wrong. expected=%q, got=%q", test.expectedType, tok.Type)
}
for i, tt := range tests {
t.Run(fmt.Sprintf("TestLexerReadNumber[%d]", i), func(t *testing.T) {
lexer := New(strings.NewReader(tt.input))
tok := lexer.NextToken()

if tok.Type != tt.expectedTokenType {
t.Fatalf("token type wrong. expected=%q, got=%q", token.INT, tok.Type)
}

if tok.Literal != tt.expected {
t.Fatalf("literal wrong. expected=%q, got=%q", tt.expected, tok.Literal)
}
})

if string(tok.Literal) != test.expectedLiteral {
t.Fatalf("literal wrong. expected=%q, got=%q", test.expectedLiteral, tok.Literal)
}
}
}

0 comments on commit 343f184

Please sign in to comment.