mirror of
https://github.com/nxshock/zkv.git
synced 2025-04-20 09:21:50 +05:00
Compare commits
No commits in common. "d950b6546c1fba17c5ee980e4235f3d424b7ac98" and "0458ac515222c3c841b3370dff03e89565042a18" have entirely different histories.
d950b6546c
...
0458ac5152
22
README.md
22
README.md
@ -4,17 +4,17 @@ Simple key-value store for single-user applications.
|
|||||||
|
|
||||||
## Pros
|
## Pros
|
||||||
|
|
||||||
* Simple two file structure (data file and index file)
|
* Simple one file structure
|
||||||
* Internal Zstandard compression by [klauspost/compress/zstd](https://github.com/klauspost/compress/tree/master/zstd)
|
* Internal Zstandard compression by [klauspost/compress/zstd](https://github.com/klauspost/compress/tree/master/zstd)
|
||||||
* Threadsafe operations through `sync.RWMutex`
|
* Threadsafe operations through `sync.RWMutex`
|
||||||
|
|
||||||
## Cons
|
## Cons
|
||||||
|
|
||||||
* Index stored in memory (`map[key hash (28 bytes)]file offset (int64)`)
|
* Index stored in memory (`map[key hash (28 bytes)]file offset (int64)`)
|
||||||
* No transaction system
|
* Need to read the whole file on store open to create file index (you can use index file options to avoid this)
|
||||||
* Index file is fully rewrited on every store commit
|
|
||||||
* No way to recover disk space from deleted records
|
* No way to recover disk space from deleted records
|
||||||
* Write/Delete operations block Read and each other operations
|
* Write/Delete operations block Read and each other operations
|
||||||
|
* Need to decode whole file until stored value
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
@ -63,6 +63,9 @@ type Options struct {
|
|||||||
|
|
||||||
// Disk write buffer size in bytes
|
// Disk write buffer size in bytes
|
||||||
DiskBufferSize int
|
DiskBufferSize int
|
||||||
|
|
||||||
|
// Use index file
|
||||||
|
UseIndexFile bool
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -84,17 +87,6 @@ File is log stuctured list of commands:
|
|||||||
| Length | Record body bytes length | int64 |
|
| Length | Record body bytes length | int64 |
|
||||||
| Body | Gob-encoded record | variable |
|
| Body | Gob-encoded record | variable |
|
||||||
|
|
||||||
Index file is simple gob-encoded map:
|
|
||||||
|
|
||||||
```go
|
|
||||||
map[string]struct {
|
|
||||||
BlockOffset int64
|
|
||||||
RecordOffset int64
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
where map key is data key hash and value - data offset in data file.
|
|
||||||
|
|
||||||
## Resource consumption
|
## Resource consumption
|
||||||
|
|
||||||
Store requirements:
|
Store requirements:
|
||||||
@ -105,4 +97,4 @@ Store requirements:
|
|||||||
## TODO
|
## TODO
|
||||||
|
|
||||||
- [ ] Add recovery previous state of store file on write error
|
- [ ] Add recovery previous state of store file on write error
|
||||||
- [ ] Add method for index rebuild
|
- [ ] Add fast file seek to value (add compressed block start position)
|
||||||
|
Binary file not shown.
Binary file not shown.
@ -11,7 +11,7 @@ var defaultOptions = Options{
|
|||||||
CompressionLevel: zstd.SpeedDefault,
|
CompressionLevel: zstd.SpeedDefault,
|
||||||
MemoryBufferSize: 4 * 1024 * 1024,
|
MemoryBufferSize: 4 * 1024 * 1024,
|
||||||
DiskBufferSize: 1 * 1024 * 1024,
|
DiskBufferSize: 1 * 1024 * 1024,
|
||||||
useIndexFile: true,
|
UseIndexFile: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
const indexFileExt = ".idx"
|
const indexFileExt = ".idx"
|
||||||
|
@ -16,12 +16,10 @@ type Options struct {
|
|||||||
DiskBufferSize int
|
DiskBufferSize int
|
||||||
|
|
||||||
// Use index file
|
// Use index file
|
||||||
useIndexFile bool
|
UseIndexFile bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Options) setDefaults() {
|
func (o *Options) setDefaults() {
|
||||||
o.useIndexFile = true // TODO: implement database search without index
|
|
||||||
|
|
||||||
if o.MaxParallelReads == 0 {
|
if o.MaxParallelReads == 0 {
|
||||||
o.MaxParallelReads = defaultOptions.MaxParallelReads
|
o.MaxParallelReads = defaultOptions.MaxParallelReads
|
||||||
}
|
}
|
||||||
|
87
zkv.go
87
zkv.go
@ -14,15 +14,11 @@ import (
|
|||||||
"github.com/klauspost/compress/zstd"
|
"github.com/klauspost/compress/zstd"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Offsets struct {
|
|
||||||
BlockOffset int64
|
|
||||||
RecordOffset int64
|
|
||||||
}
|
|
||||||
|
|
||||||
type Store struct {
|
type Store struct {
|
||||||
dataOffset map[string]Offsets
|
dataOffset map[string]int64
|
||||||
|
|
||||||
filePath string
|
filePath string
|
||||||
|
offset int64
|
||||||
|
|
||||||
buffer *bytes.Buffer
|
buffer *bytes.Buffer
|
||||||
bufferDataOffset map[string]int64
|
bufferDataOffset map[string]int64
|
||||||
@ -38,14 +34,15 @@ func OpenWithOptions(filePath string, options Options) (*Store, error) {
|
|||||||
options.setDefaults()
|
options.setDefaults()
|
||||||
|
|
||||||
database := &Store{
|
database := &Store{
|
||||||
dataOffset: make(map[string]Offsets),
|
dataOffset: make(map[string]int64),
|
||||||
bufferDataOffset: make(map[string]int64),
|
bufferDataOffset: make(map[string]int64),
|
||||||
|
offset: 0,
|
||||||
buffer: new(bytes.Buffer),
|
buffer: new(bytes.Buffer),
|
||||||
filePath: filePath,
|
filePath: filePath,
|
||||||
options: options,
|
options: options,
|
||||||
readOrderChan: make(chan struct{}, int(options.MaxParallelReads))}
|
readOrderChan: make(chan struct{}, int(options.MaxParallelReads))}
|
||||||
|
|
||||||
if options.useIndexFile {
|
if options.UseIndexFile {
|
||||||
idxFile, err := os.Open(filePath + indexFileExt)
|
idxFile, err := os.Open(filePath + indexFileExt)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = gob.NewDecoder(idxFile).Decode(&database.dataOffset)
|
err = gob.NewDecoder(idxFile).Decode(&database.dataOffset)
|
||||||
@ -83,7 +80,7 @@ func OpenWithOptions(filePath string, options Options) (*Store, error) {
|
|||||||
|
|
||||||
switch record.Type {
|
switch record.Type {
|
||||||
case RecordTypeSet:
|
case RecordTypeSet:
|
||||||
database.dataOffset[string(record.KeyHash[:])] = Offsets{} // offset
|
database.dataOffset[string(record.KeyHash[:])] = offset
|
||||||
case RecordTypeDelete:
|
case RecordTypeDelete:
|
||||||
delete(database.dataOffset, string(record.KeyHash[:]))
|
delete(database.dataOffset, string(record.KeyHash[:]))
|
||||||
}
|
}
|
||||||
@ -286,7 +283,7 @@ func (s *Store) getGobBytes(keyHash [sha256.Size224]byte) ([]byte, error) {
|
|||||||
return record.ValueBytes, nil
|
return record.ValueBytes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
offsets, exists := s.dataOffset[string(keyHash[:])]
|
offset, exists = s.dataOffset[string(keyHash[:])]
|
||||||
if !exists {
|
if !exists {
|
||||||
return nil, ErrNotExists
|
return nil, ErrNotExists
|
||||||
}
|
}
|
||||||
@ -297,18 +294,13 @@ func (s *Store) getGobBytes(keyHash [sha256.Size224]byte) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
defer readF.Close()
|
defer readF.Close()
|
||||||
|
|
||||||
_, err = readF.Seek(offsets.BlockOffset, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
decompressor, err := zstd.NewReader(readF)
|
decompressor, err := zstd.NewReader(readF)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer decompressor.Close()
|
defer decompressor.Close()
|
||||||
|
|
||||||
err = skip(decompressor, offsets.RecordOffset)
|
err = skip(decompressor, offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -325,6 +317,7 @@ func (s *Store) getGobBytes(keyHash [sha256.Size224]byte) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return record.ValueBytes, nil
|
return record.ValueBytes, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Store) get(key, value interface{}) error {
|
func (s *Store) get(key, value interface{}) error {
|
||||||
@ -336,12 +329,57 @@ func (s *Store) get(key, value interface{}) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := s.getGobBytes(hashToFind)
|
offset, exists := s.bufferDataOffset[string(hashToFind[:])]
|
||||||
|
if exists {
|
||||||
|
reader := bytes.NewReader(s.buffer.Bytes())
|
||||||
|
|
||||||
|
err = skip(reader, offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return decode(b, value)
|
_, record, err := readRecord(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return decode(record.ValueBytes, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
offset, exists = s.dataOffset[string(hashToFind[:])]
|
||||||
|
if !exists {
|
||||||
|
return ErrNotExists
|
||||||
|
}
|
||||||
|
|
||||||
|
readF, err := os.Open(s.filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readF.Close()
|
||||||
|
|
||||||
|
decompressor, err := zstd.NewReader(readF)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer decompressor.Close()
|
||||||
|
|
||||||
|
err = skip(decompressor, offset)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, record, err := readRecord(decompressor)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(record.KeyHash[:], hashToFind[:]) {
|
||||||
|
expectedHashStr := base64.StdEncoding.EncodeToString(hashToFind[:])
|
||||||
|
gotHashStr := base64.StdEncoding.EncodeToString(record.KeyHash[:])
|
||||||
|
return fmt.Errorf("wrong hash of offset %d: expected %s, got %s", offset, expectedHashStr, gotHashStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return decode(record.ValueBytes, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Store) flush() error {
|
func (s *Store) flush() error {
|
||||||
@ -351,18 +389,13 @@ func (s *Store) flush() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("open store file: %v", err)
|
return fmt.Errorf("open store file: %v", err)
|
||||||
}
|
}
|
||||||
stat, err := f.Stat()
|
|
||||||
if err != nil {
|
|
||||||
f.Close()
|
|
||||||
return fmt.Errorf("stat store file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
diskWriteBuffer := bufio.NewWriterSize(f, s.options.DiskBufferSize)
|
diskWriteBuffer := bufio.NewWriterSize(f, s.options.DiskBufferSize)
|
||||||
|
|
||||||
encoder, err := zstd.NewWriter(diskWriteBuffer, zstd.WithEncoderLevel(s.options.CompressionLevel))
|
encoder, err := zstd.NewWriter(diskWriteBuffer, zstd.WithEncoderLevel(s.options.CompressionLevel))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.Close()
|
f.Close()
|
||||||
return fmt.Errorf("init encoder: %v", err)
|
return fmt.Errorf("open store file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = s.buffer.WriteTo(encoder)
|
_, err = s.buffer.WriteTo(encoder)
|
||||||
@ -371,11 +404,13 @@ func (s *Store) flush() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for key, val := range s.bufferDataOffset {
|
for key, val := range s.bufferDataOffset {
|
||||||
s.dataOffset[key] = Offsets{BlockOffset: stat.Size(), RecordOffset: val}
|
s.dataOffset[key] = val + s.offset
|
||||||
}
|
}
|
||||||
|
|
||||||
s.bufferDataOffset = make(map[string]int64)
|
s.bufferDataOffset = make(map[string]int64)
|
||||||
|
|
||||||
|
s.offset += l
|
||||||
|
|
||||||
err = encoder.Close()
|
err = encoder.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: truncate file to previous state
|
// TODO: truncate file to previous state
|
||||||
@ -394,7 +429,7 @@ func (s *Store) flush() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update index file only on data update
|
// Update index file only on data update
|
||||||
if s.options.useIndexFile && l > 0 {
|
if s.options.UseIndexFile && l > 0 {
|
||||||
idxBuf := new(bytes.Buffer)
|
idxBuf := new(bytes.Buffer)
|
||||||
|
|
||||||
err = gob.NewEncoder(idxBuf).Encode(s.dataOffset)
|
err = gob.NewEncoder(idxBuf).Encode(s.dataOffset)
|
||||||
|
15
zkv_test.go
15
zkv_test.go
@ -39,7 +39,6 @@ func TestReadWriteBasic(t *testing.T) {
|
|||||||
const filePath = "TestReadWriteBasic.zkv"
|
const filePath = "TestReadWriteBasic.zkv"
|
||||||
const recordCount = 100
|
const recordCount = 100
|
||||||
defer os.Remove(filePath)
|
defer os.Remove(filePath)
|
||||||
defer os.Remove(filePath + indexFileExt)
|
|
||||||
|
|
||||||
db, err := Open(filePath)
|
db, err := Open(filePath)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@ -85,7 +84,6 @@ func TestSmallWrites(t *testing.T) {
|
|||||||
const filePath = "TestSmallWrites.zkv"
|
const filePath = "TestSmallWrites.zkv"
|
||||||
const recordCount = 100
|
const recordCount = 100
|
||||||
defer os.Remove(filePath)
|
defer os.Remove(filePath)
|
||||||
defer os.Remove(filePath + indexFileExt)
|
|
||||||
|
|
||||||
for i := 1; i <= recordCount; i++ {
|
for i := 1; i <= recordCount; i++ {
|
||||||
db, err := Open(filePath)
|
db, err := Open(filePath)
|
||||||
@ -121,7 +119,6 @@ func TestDeleteBasic(t *testing.T) {
|
|||||||
const filePath = "TestDeleteBasic.zkv"
|
const filePath = "TestDeleteBasic.zkv"
|
||||||
const recordCount = 100
|
const recordCount = 100
|
||||||
defer os.Remove(filePath)
|
defer os.Remove(filePath)
|
||||||
defer os.Remove(filePath + indexFileExt)
|
|
||||||
|
|
||||||
db, err := Open(filePath)
|
db, err := Open(filePath)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@ -167,7 +164,6 @@ func TestDeleteBasic(t *testing.T) {
|
|||||||
func TestBufferBasic(t *testing.T) {
|
func TestBufferBasic(t *testing.T) {
|
||||||
const filePath = "TestBuffer.zkv"
|
const filePath = "TestBuffer.zkv"
|
||||||
defer os.Remove(filePath)
|
defer os.Remove(filePath)
|
||||||
defer os.Remove(filePath + indexFileExt)
|
|
||||||
|
|
||||||
db, err := OpenWithOptions(filePath, Options{MemoryBufferSize: 100})
|
db, err := OpenWithOptions(filePath, Options{MemoryBufferSize: 100})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@ -191,9 +187,8 @@ func TestBufferBasic(t *testing.T) {
|
|||||||
|
|
||||||
func TestBufferRead(t *testing.T) {
|
func TestBufferRead(t *testing.T) {
|
||||||
const filePath = "TestBufferRead.zkv"
|
const filePath = "TestBufferRead.zkv"
|
||||||
const recordCount = 2
|
const recordCount = 100
|
||||||
defer os.Remove(filePath)
|
defer os.Remove(filePath)
|
||||||
defer os.Remove(filePath + indexFileExt)
|
|
||||||
|
|
||||||
db, err := OpenWithOptions(filePath, Options{MemoryBufferSize: 100})
|
db, err := OpenWithOptions(filePath, Options{MemoryBufferSize: 100})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@ -246,9 +241,7 @@ func TestBackupBasic(t *testing.T) {
|
|||||||
const newFilePath = "TestBackupBasic2.zkv"
|
const newFilePath = "TestBackupBasic2.zkv"
|
||||||
const recordCount = 100
|
const recordCount = 100
|
||||||
defer os.Remove(filePath)
|
defer os.Remove(filePath)
|
||||||
defer os.Remove(filePath + indexFileExt)
|
|
||||||
defer os.Remove(newFilePath)
|
defer os.Remove(newFilePath)
|
||||||
defer os.Remove(newFilePath + indexFileExt)
|
|
||||||
|
|
||||||
db, err := Open(filePath)
|
db, err := Open(filePath)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@ -287,9 +280,7 @@ func TestBackupWithDeletedRecords(t *testing.T) {
|
|||||||
const newFilePath = "TestBackupWithDeletedRecords2.zkv"
|
const newFilePath = "TestBackupWithDeletedRecords2.zkv"
|
||||||
const recordCount = 100
|
const recordCount = 100
|
||||||
defer os.Remove(filePath)
|
defer os.Remove(filePath)
|
||||||
defer os.Remove(filePath + indexFileExt)
|
|
||||||
defer os.Remove(newFilePath)
|
defer os.Remove(newFilePath)
|
||||||
defer os.Remove(newFilePath + indexFileExt)
|
|
||||||
|
|
||||||
db, err := Open(filePath)
|
db, err := Open(filePath)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@ -344,7 +335,7 @@ func TestIndexFileBasic(t *testing.T) {
|
|||||||
defer os.Remove(filePath)
|
defer os.Remove(filePath)
|
||||||
defer os.Remove(filePath + indexFileExt)
|
defer os.Remove(filePath + indexFileExt)
|
||||||
|
|
||||||
db, err := Open(filePath)
|
db, err := OpenWithOptions(filePath, Options{UseIndexFile: true})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
for i := 1; i <= recordCount; i++ {
|
for i := 1; i <= recordCount; i++ {
|
||||||
@ -367,7 +358,7 @@ func TestIndexFileBasic(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// try to read
|
// try to read
|
||||||
db, err = Open(filePath)
|
db, err = OpenWithOptions(filePath, Options{UseIndexFile: true})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
assert.Len(t, db.dataOffset, recordCount)
|
assert.Len(t, db.dataOffset, recordCount)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user