1
0
mirror of https://github.com/nxshock/zkv.git synced 2024-11-27 11:21:02 +05:00

Compare commits

...

4 Commits

Author SHA1 Message Date
0458ac5152 Add info about read value issue 2022-12-10 22:00:42 +05:00
28f43e56d5 Add resource consumption block 2022-12-10 22:00:08 +05:00
82a36a1b9e Add separate index file option 2022-12-10 21:39:24 +05:00
533eddaed4 Fix typo 2022-12-10 21:34:16 +05:00
5 changed files with 122 additions and 6 deletions

View File

@ -10,10 +10,11 @@ Simple key-value store for single-user applications.
## Cons
* Index stored in memory (`map[key hash (28 bytes)]file offset (int64)`) - average 200-250 Mb of RAM per 1M keys
* Need to read the whole file on store open to create file index
* Index stored in memory (`map[key hash (28 bytes)]file offset (int64)`)
* Need to read the whole file on store open to create file index (you can use index file options to avoid this)
* No way to recover disk space from deleted records
* Write/Delete operations block Read and each other operations
* Need to decode whole file until stored value
## Usage
@ -47,6 +48,28 @@ err = db.Flush()
err = db.Backup("new/file/path")
```
## Store options
```go
type Options struct {
// Maximum number of concurrent reads
MaxParallelReads int
// Compression level
CompressionLevel zstd.EncoderLevel
// Memory write buffer size in bytes
MemoryBufferSize int
// Disk write buffer size in bytes
DiskBufferSize int
// Use index file
UseIndexFile bool
}
```
## File structure
Record is `encoding/gob` structure:
@ -64,7 +87,14 @@ File is log stuctured list of commands:
| Length | Record body bytes length | int64 |
| Body | Gob-encoded record | variable |
## Resource consumption
Store requirements:
* around 300 Mb of RAM per 1 million of keys
* around 34 Mb of disk space for index file per 1 million of keys
## TODO
- [ ] Implement optional separate index file to speedup store initialization
- [ ] Add recovery previous state of store file on write error
- [ ] Add fast file seek to value (add compressed block start position)

View File

@ -11,4 +11,7 @@ var defaultOptions = Options{
CompressionLevel: zstd.SpeedDefault,
MemoryBufferSize: 4 * 1024 * 1024,
DiskBufferSize: 1 * 1024 * 1024,
UseIndexFile: false,
}
const indexFileExt = ".idx"

View File

@ -12,8 +12,11 @@ type Options struct {
// Memory write buffer size in bytes
MemoryBufferSize int
// Diwk write buffer size in bytes
// Disk write buffer size in bytes
DiskBufferSize int
// Use index file
UseIndexFile bool
}
func (o *Options) setDefaults() {
@ -24,4 +27,12 @@ func (o *Options) setDefaults() {
if o.CompressionLevel == 0 {
o.CompressionLevel = defaultOptions.CompressionLevel
}
if o.MemoryBufferSize == 0 {
o.MemoryBufferSize = defaultOptions.MemoryBufferSize
}
if o.DiskBufferSize == 0 {
o.DiskBufferSize = defaultOptions.DiskBufferSize
}
}

29
zkv.go
View File

@ -5,6 +5,7 @@ import (
"bytes"
"crypto/sha256"
"encoding/base64"
"encoding/gob"
"fmt"
"io"
"os"
@ -41,6 +42,16 @@ func OpenWithOptions(filePath string, options Options) (*Store, error) {
options: options,
readOrderChan: make(chan struct{}, int(options.MaxParallelReads))}
if options.UseIndexFile {
idxFile, err := os.Open(filePath + indexFileExt)
if err == nil {
err = gob.NewDecoder(idxFile).Decode(&database.dataOffset)
if err == nil {
return database, nil
}
}
}
// restore file data
readF, err := os.Open(filePath)
if os.IsNotExist(err) {
@ -81,7 +92,8 @@ func OpenWithOptions(filePath string, options Options) (*Store, error) {
}
func Open(filePath string) (*Store, error) {
return OpenWithOptions(filePath, defaultOptions)
options := defaultOptions
return OpenWithOptions(filePath, options)
}
func (s *Store) Set(key, value interface{}) error {
@ -416,5 +428,20 @@ func (s *Store) flush() error {
return err
}
// Update index file only on data update
if s.options.UseIndexFile && l > 0 {
idxBuf := new(bytes.Buffer)
err = gob.NewEncoder(idxBuf).Encode(s.dataOffset)
if err != nil {
return err
}
err = os.WriteFile(s.filePath+indexFileExt, idxBuf.Bytes(), 0644)
if err != nil {
return err
}
}
return nil
}

View File

@ -327,5 +327,50 @@ func TestBackupWithDeletedRecords(t *testing.T) {
err = db.Close()
assert.NoError(t, err)
}
func TestIndexFileBasic(t *testing.T) {
const filePath = "TestReadWriteBasic.zkv"
const recordCount = 100
defer os.Remove(filePath)
defer os.Remove(filePath + indexFileExt)
db, err := OpenWithOptions(filePath, Options{UseIndexFile: true})
assert.NoError(t, err)
for i := 1; i <= recordCount; i++ {
err = db.Set(i, i)
assert.NoError(t, err)
}
assert.Len(t, db.dataOffset, 0)
assert.Len(t, db.bufferDataOffset, recordCount)
for i := 1; i <= recordCount; i++ {
var gotValue int
err = db.Get(i, &gotValue)
assert.NoError(t, err)
assert.Equal(t, i, gotValue)
}
err = db.Close()
assert.NoError(t, err)
// try to read
db, err = OpenWithOptions(filePath, Options{UseIndexFile: true})
assert.NoError(t, err)
assert.Len(t, db.dataOffset, recordCount)
for i := 1; i <= recordCount; i++ {
var gotValue int
err = db.Get(i, &gotValue)
assert.NoError(t, err)
assert.Equal(t, i, gotValue)
}
err = db.Close()
assert.NoError(t, err)
}