Done with chapter 3.
This commit is contained in:
parent
6f03983721
commit
725753866a
17 changed files with 1264 additions and 0 deletions
10
Makefile
Normal file
10
Makefile
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
.PHONY: compile
|
||||
compile:
|
||||
protoc api/v1/*.proto \
|
||||
--go_out=. \
|
||||
--go_opt=paths=source_relative \
|
||||
--proto_path=.
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
go test -race ./...
|
||||
152
api/v1/log.pb.go
Normal file
152
api/v1/log.pb.go
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc v3.19.2
|
||||
// source: api/v1/log.proto
|
||||
|
||||
package log_v1
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Record struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Offset uint64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Record) Reset() {
|
||||
*x = Record{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_api_v1_log_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Record) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Record) ProtoMessage() {}
|
||||
|
||||
func (x *Record) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_v1_log_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Record.ProtoReflect.Descriptor instead.
|
||||
func (*Record) Descriptor() ([]byte, []int) {
|
||||
return file_api_v1_log_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Record) GetValue() []byte {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Record) GetOffset() uint64 {
|
||||
if x != nil {
|
||||
return x.Offset
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var File_api_v1_log_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_api_v1_log_proto_rawDesc = []byte{
|
||||
0x0a, 0x10, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x12, 0x06, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x22, 0x36, 0x0a, 0x06, 0x52, 0x65,
|
||||
0x63, 0x6f, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66,
|
||||
0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73,
|
||||
0x65, 0x74, 0x42, 0x1f, 0x5a, 0x1d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x41, 0x59, 0x4d, 0x31, 0x36, 0x30, 0x37, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x6f, 0x67,
|
||||
0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_api_v1_log_proto_rawDescOnce sync.Once
|
||||
file_api_v1_log_proto_rawDescData = file_api_v1_log_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_api_v1_log_proto_rawDescGZIP() []byte {
|
||||
file_api_v1_log_proto_rawDescOnce.Do(func() {
|
||||
file_api_v1_log_proto_rawDescData = protoimpl.X.CompressGZIP(file_api_v1_log_proto_rawDescData)
|
||||
})
|
||||
return file_api_v1_log_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_api_v1_log_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_api_v1_log_proto_goTypes = []interface{}{
|
||||
(*Record)(nil), // 0: log.v1.Record
|
||||
}
|
||||
var file_api_v1_log_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_api_v1_log_proto_init() }
|
||||
func file_api_v1_log_proto_init() {
|
||||
if File_api_v1_log_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_api_v1_log_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Record); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_api_v1_log_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_api_v1_log_proto_goTypes,
|
||||
DependencyIndexes: file_api_v1_log_proto_depIdxs,
|
||||
MessageInfos: file_api_v1_log_proto_msgTypes,
|
||||
}.Build()
|
||||
File_api_v1_log_proto = out.File
|
||||
file_api_v1_log_proto_rawDesc = nil
|
||||
file_api_v1_log_proto_goTypes = nil
|
||||
file_api_v1_log_proto_depIdxs = nil
|
||||
}
|
||||
10
api/v1/log.proto
Normal file
10
api/v1/log.proto
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package log.v1;
|
||||
|
||||
option go_package = "github.com/AYM1607/api/log_v1";
|
||||
|
||||
message Record {
|
||||
bytes value = 1;
|
||||
uint64 offset = 2;
|
||||
}
|
||||
16
go.mod
Normal file
16
go.mod
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
module github.com/AYM1607/proglog
|
||||
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tysonmote/gommap v0.0.1
|
||||
google.golang.org/protobuf v1.27.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
|
||||
)
|
||||
31
go.sum
Normal file
31
go.sum
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/tysonmote/gommap v0.0.1 h1:62U1lazHjXy0mm40WuTeoANPKZYSxl/vbElcb2i8hTc=
|
||||
github.com/tysonmote/gommap v0.0.1/go.mod h1:zZKhSp7mLDDzdl8MHbaDEJ3PH9VibPlFXV1t+4wmC00=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54=
|
||||
launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM=
|
||||
9
internal/log/config.go
Normal file
9
internal/log/config.go
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
package log
|
||||
|
||||
type Config struct {
|
||||
Segment struct {
|
||||
MaxStoreBytes uint64
|
||||
MaxIndexBytes uint64
|
||||
InitialOffset uint64
|
||||
}
|
||||
}
|
||||
102
internal/log/index.go
Normal file
102
internal/log/index.go
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/tysonmote/gommap"
|
||||
)
|
||||
|
||||
var (
|
||||
offWidth uint64 = 4
|
||||
posWidth uint64 = 8
|
||||
entWidth = offWidth + posWidth
|
||||
)
|
||||
|
||||
type index struct {
|
||||
file *os.File
|
||||
mmap gommap.MMap
|
||||
size uint64
|
||||
}
|
||||
|
||||
func newIndex(f *os.File, c Config) (*index, error) {
|
||||
idx := &index{
|
||||
file: f,
|
||||
}
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idx.size = uint64(fi.Size())
|
||||
|
||||
// File needs to be expanded to its max size as re-sizing is not possible after memory map.
|
||||
err = os.Truncate(
|
||||
f.Name(),
|
||||
int64(c.Segment.MaxIndexBytes),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if idx.mmap, err = gommap.Map(
|
||||
idx.file.Fd(),
|
||||
gommap.PROT_READ|gommap.PROT_WRITE,
|
||||
gommap.MAP_SHARED,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
// Read takes a relative offset and returns the offset and the position of tha offset
|
||||
// in the store.
|
||||
// Information of the last record is returned if `in` is -1
|
||||
func (i *index) Read(in int64) (off uint32, pos uint64, err error) {
|
||||
var out uint32
|
||||
if i.size == 0 {
|
||||
return 0, 0, io.EOF
|
||||
}
|
||||
if in == -1 {
|
||||
out = uint32((i.size / entWidth) - 1)
|
||||
} else {
|
||||
out = uint32(in)
|
||||
}
|
||||
idxPos := uint64(out) * entWidth
|
||||
|
||||
if idxPos >= i.size {
|
||||
return 0, 0, io.EOF
|
||||
}
|
||||
off = enc.Uint32(i.mmap[idxPos : idxPos+offWidth])
|
||||
pos = enc.Uint64(i.mmap[idxPos+offWidth : idxPos+entWidth])
|
||||
return off, pos, nil
|
||||
}
|
||||
|
||||
func (i *index) Write(off uint32, pos uint64) error {
|
||||
if i.size+entWidth > uint64(len(i.mmap)) {
|
||||
return io.EOF
|
||||
}
|
||||
idxPos := i.size
|
||||
enc.PutUint32(i.mmap[idxPos:idxPos+offWidth], off)
|
||||
enc.PutUint64(i.mmap[idxPos+offWidth:idxPos+entWidth], pos)
|
||||
i.size += entWidth
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *index) Name() string {
|
||||
return i.file.Name()
|
||||
}
|
||||
|
||||
func (i *index) Close() error {
|
||||
if err := i.mmap.Sync(gommap.MS_SYNC); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := i.file.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Truncate back to real file size after memory mapped data is synced.
|
||||
if err := i.file.Truncate(int64(i.size)); err != nil {
|
||||
return err
|
||||
}
|
||||
return i.file.Close()
|
||||
}
|
||||
59
internal/log/index_test.go
Normal file
59
internal/log/index_test.go
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TODO: Missing test for out of bounds writes.
|
||||
// TODO: Improve creation and usage of test data set (entries).
|
||||
func TestIndex(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "index_test")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
c := Config{}
|
||||
c.Segment.MaxIndexBytes = 1024
|
||||
|
||||
idx, err := newIndex(f, c)
|
||||
require.NoError(t, err)
|
||||
_, _, err = idx.Read(-1)
|
||||
require.Error(t, err, "Index read fails if empty")
|
||||
|
||||
entries := []struct {
|
||||
Off uint32
|
||||
Pos uint64
|
||||
}{
|
||||
{Off: 0, Pos: 0},
|
||||
{Off: 1, Pos: 10},
|
||||
}
|
||||
|
||||
for _, want := range entries {
|
||||
err = idx.Write(want.Off, want.Pos)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, pos, err := idx.Read(int64(want.Off))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, want.Pos, pos)
|
||||
}
|
||||
|
||||
_, _, err = idx.Read(
|
||||
int64(len(entries)),
|
||||
)
|
||||
require.Equal(t, io.EOF, err, "Read fails when requesting out of bounds offset")
|
||||
|
||||
err = idx.Close()
|
||||
require.NoError(t, err, "Closes successfully")
|
||||
|
||||
f, _ = os.OpenFile(f.Name(), os.O_RDWR, 0600)
|
||||
idx, err = newIndex(f, c)
|
||||
require.NoError(t, err, "Opens from a pre-existing index file")
|
||||
off, pos, err := idx.Read(-1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint32(1), off)
|
||||
require.Equal(t, entries[1].Pos, pos)
|
||||
}
|
||||
203
internal/log/log.go
Normal file
203
internal/log/log.go
Normal file
|
|
@ -0,0 +1,203 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
api "github.com/AYM1607/proglog/api/v1"
|
||||
)
|
||||
|
||||
type Log struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
Dir string
|
||||
Config Config
|
||||
|
||||
activeSegment *segment
|
||||
segments []*segment
|
||||
}
|
||||
|
||||
func NewLog(dir string, c Config) (*Log, error) {
|
||||
if c.Segment.MaxIndexBytes == 0 {
|
||||
c.Segment.MaxIndexBytes = 1024
|
||||
}
|
||||
if c.Segment.MaxStoreBytes == 0 {
|
||||
c.Segment.MaxStoreBytes = 1024
|
||||
}
|
||||
l := &Log{
|
||||
Dir: dir,
|
||||
Config: c,
|
||||
}
|
||||
return l, l.setup()
|
||||
}
|
||||
|
||||
// setup hidrates the log with segments that already exist in the directory.
|
||||
func (l *Log) setup() error {
|
||||
files, err := ioutil.ReadDir(l.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var baseOffsets []uint64
|
||||
for _, file := range files {
|
||||
offStr := strings.TrimSuffix(
|
||||
file.Name(),
|
||||
path.Ext(file.Name()),
|
||||
)
|
||||
off, _ := strconv.ParseUint(offStr, 10, 0)
|
||||
baseOffsets = append(baseOffsets, off)
|
||||
}
|
||||
sort.Slice(baseOffsets, func(i, j int) bool {
|
||||
return baseOffsets[i] < baseOffsets[j]
|
||||
})
|
||||
|
||||
for i := 0; i < len(baseOffsets); i++ {
|
||||
if err := l.newSegment(baseOffsets[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
// There's a baseOffset for the store file and one for the index.
|
||||
// We only need to create the segment once per baseOffset.
|
||||
i++
|
||||
}
|
||||
if len(l.segments) == 0 {
|
||||
if err := l.newSegment(
|
||||
l.Config.Segment.InitialOffset,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Log) Append(record *api.Record) (uint64, error) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.activeSegment.IsMaxed() {
|
||||
err := l.newSegment(l.activeSegment.nextOffset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
off, err := l.activeSegment.Append(record)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (l *Log) Read(off uint64) (*api.Record, error) {
|
||||
l.mu.RLock()
|
||||
defer l.mu.RUnlock()
|
||||
var s *segment
|
||||
// Find the segment where the record is located.
|
||||
for _, segment := range l.segments {
|
||||
if segment.baseOffset <= off && off < segment.nextOffset {
|
||||
s = segment
|
||||
break
|
||||
}
|
||||
}
|
||||
if s == nil {
|
||||
return nil, fmt.Errorf("offset out range: %d", off)
|
||||
}
|
||||
return s.Read(off)
|
||||
}
|
||||
|
||||
func (l *Log) Close() error {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
for _, segment := range l.segments {
|
||||
if err := segment.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Log) Remove() error {
|
||||
if err := l.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.RemoveAll(l.Dir)
|
||||
}
|
||||
|
||||
func (l *Log) Reset() error {
|
||||
if err := l.Remove(); err != nil {
|
||||
return err
|
||||
}
|
||||
return l.setup()
|
||||
}
|
||||
|
||||
func (l *Log) LowestOffset() (uint64, error) {
|
||||
l.mu.RLock()
|
||||
defer l.mu.RUnlock()
|
||||
return l.segments[0].baseOffset, nil
|
||||
}
|
||||
|
||||
func (l *Log) HighestOffset() (uint64, error) {
|
||||
l.mu.RLock()
|
||||
defer l.mu.RUnlock()
|
||||
off := l.segments[len(l.segments)-1].nextOffset
|
||||
if off == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return off - 1, nil
|
||||
}
|
||||
|
||||
// Truncate deletes all segments whose highest offset is lower than lowest.
|
||||
// Guarantees that the record with offset "lowest" is kept in the log.
|
||||
func (l *Log) Truncate(lowest uint64) error {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
var segments []*segment
|
||||
for _, s := range l.segments {
|
||||
if (s.nextOffset - 1) < lowest {
|
||||
if err := s.Remove(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
segments = append(segments, s)
|
||||
}
|
||||
// What happends if we delete the whole log?
|
||||
// We'd end up with no segments and thus no active segment and subsequent
|
||||
// writes would fail.
|
||||
l.segments = segments
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Log) Reader() io.Reader {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
readers := make([]io.Reader, len(l.segments))
|
||||
for i, segment := range l.segments {
|
||||
readers[i] = &originReader{segment.store, 0}
|
||||
}
|
||||
return io.MultiReader(readers...)
|
||||
}
|
||||
|
||||
type originReader struct {
|
||||
*store
|
||||
off int64 // Offset relative to the start of the store.
|
||||
}
|
||||
|
||||
func (o *originReader) Read(p []byte) (int, error) {
|
||||
n, err := o.ReadAt(p, o.off)
|
||||
o.off += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (l *Log) newSegment(off uint64) error {
|
||||
s, err := newSegment(l.Dir, off, l.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.segments = append(l.segments, s)
|
||||
l.activeSegment = s
|
||||
return nil
|
||||
}
|
||||
121
internal/log/log_test.go
Normal file
121
internal/log/log_test.go
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
api "github.com/AYM1607/proglog/api/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestLog(t *testing.T) {
|
||||
for scenario, fn := range map[string]func(
|
||||
t *testing.T, log *Log,
|
||||
){
|
||||
"append and read a record suceeds": testAppendRead,
|
||||
"offset out of range error": testOutOfRangeErr,
|
||||
"init with existing segments": testInitExisting,
|
||||
"reader": testReader,
|
||||
"truncate": testTruncate,
|
||||
} {
|
||||
t.Run(scenario, func(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "store-test")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
c := Config{}
|
||||
// Guarantee that each segment will only have one record.
|
||||
c.Segment.MaxIndexBytes = entWidth
|
||||
log, err := NewLog(dir, c)
|
||||
require.NoError(t, err)
|
||||
|
||||
fn(t, log)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testAppendRead(t *testing.T, log *Log) {
|
||||
want := &api.Record{
|
||||
Value: []byte("hello world"),
|
||||
}
|
||||
off, err := log.Append(want)
|
||||
require.NoError(t, err, "Record is appended successfully")
|
||||
require.Equal(t, uint64(0), off, "First written record has offset 0")
|
||||
|
||||
read, err := log.Read(off)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, want.Value, read.Value)
|
||||
require.Equal(t, off, read.Offset)
|
||||
}
|
||||
|
||||
func testOutOfRangeErr(t *testing.T, log *Log) {
|
||||
read, err := log.Read(10)
|
||||
require.Nil(t, read)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func testInitExisting(t *testing.T, o *Log) {
|
||||
record := &api.Record{
|
||||
Value: []byte("hello world"),
|
||||
}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
_, err := o.Append(record)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, o.Close())
|
||||
|
||||
off, err := o.LowestOffset()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), off)
|
||||
off, err = o.HighestOffset()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), off)
|
||||
|
||||
n, err := NewLog(o.Dir, o.Config)
|
||||
require.NoError(t, err)
|
||||
|
||||
off, err = n.LowestOffset()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), off)
|
||||
off, err = n.HighestOffset()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), off)
|
||||
}
|
||||
|
||||
func testReader(t *testing.T, log *Log) {
|
||||
want := &api.Record{
|
||||
Value: []byte("hello world"),
|
||||
}
|
||||
off, err := log.Append(want)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), off)
|
||||
|
||||
reader := log.Reader()
|
||||
b, err := ioutil.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
|
||||
read := &api.Record{}
|
||||
err = proto.Unmarshal(b[lenWidth:], read) // Ignore the bytes used to store the length of the record.
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, want.Value, read.Value)
|
||||
}
|
||||
|
||||
func testTruncate(t *testing.T, log *Log) {
|
||||
record := &api.Record{
|
||||
Value: []byte("hello world"),
|
||||
}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
_, err := log.Append(record)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err := log.Truncate(1)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = log.Read(0)
|
||||
require.Error(t, err)
|
||||
}
|
||||
127
internal/log/segment.go
Normal file
127
internal/log/segment.go
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
api "github.com/AYM1607/proglog/api/v1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type segment struct {
|
||||
store *store
|
||||
index *index
|
||||
baseOffset uint64 // Absolute offset.
|
||||
nextOffset uint64 // Absolute offset.
|
||||
config Config
|
||||
}
|
||||
|
||||
func newSegment(dir string, baseOffset uint64, c Config) (*segment, error) {
|
||||
s := &segment{
|
||||
baseOffset: baseOffset,
|
||||
config: c,
|
||||
}
|
||||
storeF, err := os.OpenFile(
|
||||
path.Join(dir, fmt.Sprintf("%d%s", baseOffset, ".store")),
|
||||
os.O_RDWR|os.O_CREATE|os.O_APPEND,
|
||||
0644,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.store, err = newStore(storeF); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexF, err := os.OpenFile(
|
||||
path.Join(dir, fmt.Sprintf("%d%s", baseOffset, ".index")),
|
||||
os.O_RDWR|os.O_CREATE,
|
||||
0644,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.index, err = newIndex(indexF, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the next offset by getting the last element in the index.
|
||||
// The `Read` call will return an error if the index is empty.
|
||||
off, _, err := s.index.Read(-1)
|
||||
if err != nil {
|
||||
s.nextOffset = s.baseOffset
|
||||
} else {
|
||||
s.nextOffset = s.baseOffset + uint64(off) + 1
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *segment) Append(record *api.Record) (offset uint64, err error) {
|
||||
recordOff := s.nextOffset
|
||||
|
||||
// Write record to the store.
|
||||
record.Offset = recordOff
|
||||
p, err := proto.Marshal(record)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, pos, err := s.store.Append(p)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Write record's position to the index.
|
||||
if err = s.index.Write(
|
||||
// Index's offsets are relative.
|
||||
uint32(recordOff-s.baseOffset),
|
||||
pos,
|
||||
); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
s.nextOffset++
|
||||
return recordOff, nil
|
||||
}
|
||||
|
||||
func (s *segment) Read(off uint64) (*api.Record, error) {
|
||||
_, pos, err := s.index.Read(int64(off - s.baseOffset))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p, err := s.store.Read(pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
record := &api.Record{}
|
||||
err = proto.Unmarshal(p, record)
|
||||
return record, err
|
||||
}
|
||||
|
||||
func (s *segment) IsMaxed() bool {
|
||||
return s.store.size >= s.config.Segment.MaxStoreBytes ||
|
||||
s.index.size >= s.config.Segment.MaxIndexBytes
|
||||
}
|
||||
|
||||
func (s *segment) Close() error {
|
||||
if err := s.index.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.store.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *segment) Remove() error {
|
||||
if err := s.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(s.store.Name()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(s.index.Name()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
68
internal/log/segment_test.go
Normal file
68
internal/log/segment_test.go
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
api "github.com/AYM1607/proglog/api/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
baseOff uint64 = 16
|
||||
)
|
||||
|
||||
func TestSegment(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "segmet_test")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
want := &api.Record{Value: []byte("hello world!")}
|
||||
|
||||
// Index-limited config.
|
||||
c := Config{}
|
||||
c.Segment.MaxStoreBytes = 1024
|
||||
c.Segment.MaxIndexBytes = entWidth * 3
|
||||
|
||||
s, err := newSegment(dir, baseOff, c)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, baseOff, s.nextOffset, "next offset is the base offset for an empty segment")
|
||||
require.False(t, s.IsMaxed())
|
||||
|
||||
for i := uint64(0); i < 3; i++ {
|
||||
off, err := s.Append(want)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, baseOff+i, off)
|
||||
|
||||
got, err := s.Read(off)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, want.Value, got.Value)
|
||||
}
|
||||
|
||||
_, err = s.Append(want)
|
||||
require.True(t, s.IsMaxed())
|
||||
require.Equal(t, io.EOF, err, "Append fails when the index is full")
|
||||
|
||||
// Store-limited config.
|
||||
// This is not really accurate. The Marshalled record with the added bytes
|
||||
// for the length will be longer that just the length of the value in bytes.
|
||||
// If more fields are added to the record, 2 could cause the store to fill up
|
||||
// and this test would fail.
|
||||
c.Segment.MaxStoreBytes = uint64(len(want.Value) * 3)
|
||||
c.Segment.MaxIndexBytes = 1024
|
||||
|
||||
// Create from the existing files.
|
||||
s, err = newSegment(dir, baseOff, c)
|
||||
require.NoError(t, err)
|
||||
require.True(t, s.IsMaxed())
|
||||
|
||||
err = s.Remove()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Re-create files.
|
||||
s, err = newSegment(dir, baseOff, c)
|
||||
require.NoError(t, err)
|
||||
require.False(t, s.IsMaxed())
|
||||
}
|
||||
96
internal/log/store.go
Normal file
96
internal/log/store.go
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var enc = binary.BigEndian
|
||||
|
||||
const lenWidth = 8 // Bytes used to store the length of a record.
|
||||
|
||||
type store struct {
|
||||
*os.File
|
||||
mu sync.Mutex
|
||||
buf *bufio.Writer
|
||||
size uint64
|
||||
}
|
||||
|
||||
func newStore(f *os.File) (*store, error) {
|
||||
fi, err := os.Stat(f.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size := uint64(fi.Size())
|
||||
return &store{
|
||||
File: f,
|
||||
size: size,
|
||||
buf: bufio.NewWriter(f),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *store) Append(p []byte) (n uint64, pos uint64, err error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
pos = s.size // Writing the record starting at the end of the file.
|
||||
|
||||
// Write the length of the record.
|
||||
if err := binary.Write(s.buf, enc, uint64(len(p))); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
w, err := s.buf.Write(p)
|
||||
// There's a potential problem here, the call to write could return an error,
|
||||
// but still have written a partial set of the data to the buffer. This could
|
||||
// introduce inconsistencies at read time.
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
w += lenWidth
|
||||
s.size += uint64(w)
|
||||
return uint64(w), pos, nil
|
||||
}
|
||||
|
||||
func (s *store) Read(pos uint64) ([]byte, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if err := s.buf.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawSize := make([]byte, lenWidth)
|
||||
if _, err := s.File.ReadAt(rawSize, int64(pos)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size := enc.Uint64(rawSize)
|
||||
|
||||
b := make([]byte, size)
|
||||
if _, err := s.File.ReadAt(b, int64(pos+lenWidth)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (s *store) ReadAt(p []byte, off int64) (int, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if err := s.buf.Flush(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return s.File.ReadAt(p, off)
|
||||
}
|
||||
|
||||
func (s *store) Close() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
err := s.buf.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.File.Close()
|
||||
}
|
||||
114
internal/log/store_test.go
Normal file
114
internal/log/store_test.go
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
write = []byte("hello world")
|
||||
width = uint64(len(write)) + lenWidth
|
||||
)
|
||||
|
||||
const rCnt = 3
|
||||
|
||||
func TestStoreAppendRead(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "store_append_read_test")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
s, err := newStore(f)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test basic operations on the store.
|
||||
testAppend(t, s)
|
||||
testRead(t, s)
|
||||
testReadAt(t, s)
|
||||
|
||||
// Test that the store can be reconstructed from an existing file.
|
||||
s, err = newStore(f)
|
||||
require.NoError(t, err)
|
||||
testRead(t, s)
|
||||
}
|
||||
|
||||
func testAppend(t *testing.T, s *store) {
|
||||
t.Helper()
|
||||
for i := uint64(1); i <= rCnt; i++ {
|
||||
n, pos, err := s.Append(write)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, width*i, pos+n, "Bytes written to the store file must be the length of the message + `lenWidth` bytes for length")
|
||||
}
|
||||
}
|
||||
|
||||
// testRead ensures `rCnt` records were written to the store and the contet matches `write`.
|
||||
func testRead(t *testing.T, s *store) {
|
||||
t.Helper()
|
||||
var pos uint64
|
||||
for i := uint64(1); i <= rCnt; i++ {
|
||||
read, err := s.Read(pos)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, write, read, "Record value should match the written one.")
|
||||
pos += width
|
||||
}
|
||||
}
|
||||
|
||||
func testReadAt(t *testing.T, s *store) {
|
||||
t.Helper()
|
||||
for i, off := uint64(1), int64(0); i <= rCnt; i++ {
|
||||
// Read record size.
|
||||
b := make([]byte, lenWidth)
|
||||
n, err := s.ReadAt(b, off)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lenWidth, n)
|
||||
size := enc.Uint64(b)
|
||||
off += int64(n)
|
||||
|
||||
// Read record content.
|
||||
b = make([]byte, size)
|
||||
n, err = s.ReadAt(b, off)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, write, b)
|
||||
off += int64(n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClose(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "store_close_test")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
s, err := newStore(f)
|
||||
require.NoError(t, err)
|
||||
|
||||
f, beforeSize, err := openFile(f.Name())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = s.Append(write)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, afterSize, err := openFile(f.Name())
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, afterSize, beforeSize)
|
||||
}
|
||||
|
||||
func openFile(fn string) (file *os.File, size int64, err error) {
|
||||
f, err := os.OpenFile(
|
||||
fn,
|
||||
os.O_RDWR|os.O_CREATE|os.O_APPEND,
|
||||
0644,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return f, fi.Size(), nil
|
||||
}
|
||||
12
prototype/cmd/server/main.go
Normal file
12
prototype/cmd/server/main.go
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/AYM1607/proglog/prototype/internal/server"
|
||||
)
|
||||
|
||||
func main() {
|
||||
srv := server.NewHTPPServer(":8080")
|
||||
log.Fatal(srv.ListenAndServe())
|
||||
}
|
||||
95
prototype/internal/server/http.go
Normal file
95
prototype/internal/server/http.go
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func NewHTPPServer(addr string) *http.Server {
|
||||
controller := newHTTPController()
|
||||
r := mux.NewRouter()
|
||||
|
||||
r.HandleFunc("/", controller.handleProduce).Methods("POST")
|
||||
r.HandleFunc("/", controller.handleConsume).Methods("GET")
|
||||
|
||||
return &http.Server{
|
||||
Addr: addr,
|
||||
Handler: r,
|
||||
}
|
||||
}
|
||||
|
||||
type httpController struct {
|
||||
Log *Log
|
||||
}
|
||||
|
||||
func newHTTPController() *httpController {
|
||||
return &httpController{
|
||||
Log: NewLog(),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *httpController) handleProduce(w http.ResponseWriter, r *http.Request) {
|
||||
var req ProduceRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
off, err := c.Log.Append(req.Record)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
res := ProduceResponse{Offset: off}
|
||||
// Writing directly to the response writer.
|
||||
err = json.NewEncoder(w).Encode(res)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *httpController) handleConsume(w http.ResponseWriter, r *http.Request) {
|
||||
var req ConsumeRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
record, err := c.Log.Read(req.Offset)
|
||||
if err == ErrOffsetNotFound {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
res := ConsumeResponse{Record: record}
|
||||
// Writing directly to the response writer.
|
||||
err = json.NewEncoder(w).Encode(res)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
type ProduceRequest struct {
|
||||
Record Record `json:"record"`
|
||||
}
|
||||
|
||||
type ProduceResponse struct {
|
||||
Offset uint64 `json:"offset"`
|
||||
}
|
||||
|
||||
type ConsumeRequest struct {
|
||||
Offset uint64 `json:"offset"`
|
||||
}
|
||||
|
||||
type ConsumeResponse struct {
|
||||
Record Record `json:"record"`
|
||||
}
|
||||
39
prototype/internal/server/log.go
Normal file
39
prototype/internal/server/log.go
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var ErrOffsetNotFound = errors.New("offset not found")
|
||||
|
||||
type Record struct {
|
||||
Value []byte `json:"value"`
|
||||
Offset uint64 `json:"offset"`
|
||||
}
|
||||
|
||||
type Log struct {
|
||||
mu sync.Mutex
|
||||
records []Record
|
||||
}
|
||||
|
||||
func NewLog() *Log {
|
||||
return &Log{}
|
||||
}
|
||||
|
||||
func (l *Log) Append(record Record) (uint64, error) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
record.Offset = uint64(len(l.records))
|
||||
l.records = append(l.records, record)
|
||||
return record.Offset, nil
|
||||
}
|
||||
|
||||
func (l *Log) Read(offset uint64) (Record, error) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if offset >= uint64(len(l.records)) {
|
||||
return Record{}, ErrOffsetNotFound
|
||||
}
|
||||
return l.records[offset], nil
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue