1
0
mirror of https://github.com/nxshock/zkv.git synced 2025-04-20 09:21:50 +05:00

Compare commits

...

4 Commits

Author SHA1 Message Date
fe90a55322 Add note about memory consumption 2022-12-09 20:28:24 +05:00
f093b7feed Remove seekable zstd streams note
Looks like this library does not support chunked zstd streams
2022-12-09 20:06:52 +05:00
23ee15dc23 Fix usage example 2022-12-09 20:05:57 +05:00
8dfc73af1d Speedup small writes by using write buffer 2022-12-09 20:05:30 +05:00
5 changed files with 24 additions and 12 deletions

View File

@ -10,7 +10,7 @@ Simple key-value store for single-user applications.
## Cons ## Cons
* Index stored in memory (`map[key hash (28 bytes)]file offset (int64)`) * Index stored in memory (`map[key hash (28 bytes)]file offset (int64)`) - average 200-250 Mb of RAM per 1M keys
* Need to read the whole file on store open to create file index * Need to read the whole file on store open to create file index
* No way to recover disk space from deleted records * No way to recover disk space from deleted records
* Write/Delete operations block Read and each other operations * Write/Delete operations block Read and each other operations
@ -20,7 +20,7 @@ Simple key-value store for single-user applications.
Create or open existing file: Create or open existing file:
```go ```go
db, err := Open("path to file") db, err := zkv.Open("path to file")
``` ```
Data operations: Data operations:
@ -66,6 +66,5 @@ File is log stuctured list of commands:
## TODO ## TODO
- [ ] Test [seekable zstd streams](https://github.com/SaveTheRbtz/zstd-seekable-format-go)
- [ ] Implement optional separate index file to speedup store initialization - [ ] Implement optional separate index file to speedup store initialization
- [ ] Add recovery previous state of store file on write error - [ ] Add recovery previous state of store file on write error

View File

@ -9,5 +9,6 @@ import (
var defaultOptions = Options{ var defaultOptions = Options{
MaxParallelReads: runtime.NumCPU(), MaxParallelReads: runtime.NumCPU(),
CompressionLevel: zstd.SpeedDefault, CompressionLevel: zstd.SpeedDefault,
BufferSize: 4 * 1024 * 1024, MemoryBufferSize: 4 * 1024 * 1024,
DiskBufferSize: 1 * 1024 * 1024,
} }

View File

@ -9,8 +9,11 @@ type Options struct {
// Compression level // Compression level
CompressionLevel zstd.EncoderLevel CompressionLevel zstd.EncoderLevel
// Write buffer size in bytes // Memory write buffer size in bytes
BufferSize int MemoryBufferSize int
// Diwk write buffer size in bytes
DiskBufferSize int
} }
func (o *Options) setDefaults() { func (o *Options) setDefaults() {

17
zkv.go
View File

@ -1,6 +1,7 @@
package zkv package zkv
import ( import (
"bufio"
"bytes" "bytes"
"crypto/sha256" "crypto/sha256"
"encoding/base64" "encoding/base64"
@ -124,7 +125,7 @@ func (s *Store) Delete(key interface{}) error {
return err return err
} }
if s.buffer.Len() > s.options.BufferSize { if s.buffer.Len() > s.options.MemoryBufferSize {
err = s.flush() err = s.flush()
if err != nil { if err != nil {
@ -209,7 +210,7 @@ func (s *Store) setBytes(keyHash [sha256.Size224]byte, valueBytes []byte) error
return err return err
} }
if s.buffer.Len() > s.options.BufferSize { if s.buffer.Len() > s.options.MemoryBufferSize {
err = s.flush() err = s.flush()
if err != nil { if err != nil {
@ -238,7 +239,7 @@ func (s *Store) set(key, value interface{}) error {
return err return err
} }
if s.buffer.Len() > s.options.BufferSize { if s.buffer.Len() > s.options.MemoryBufferSize {
err = s.flush() err = s.flush()
if err != nil { if err != nil {
@ -377,7 +378,9 @@ func (s *Store) flush() error {
return fmt.Errorf("open store file: %v", err) return fmt.Errorf("open store file: %v", err)
} }
encoder, err := zstd.NewWriter(f, zstd.WithEncoderLevel(s.options.CompressionLevel)) diskWriteBuffer := bufio.NewWriterSize(f, s.options.DiskBufferSize)
encoder, err := zstd.NewWriter(diskWriteBuffer, zstd.WithEncoderLevel(s.options.CompressionLevel))
if err != nil { if err != nil {
f.Close() f.Close()
return fmt.Errorf("open store file: %v", err) return fmt.Errorf("open store file: %v", err)
@ -402,6 +405,12 @@ func (s *Store) flush() error {
return err return err
} }
err = diskWriteBuffer.Flush()
if err != nil {
// TODO: truncate file to previous state
return err
}
err = f.Close() err = f.Close()
if err != nil { if err != nil {
return err return err

View File

@ -165,7 +165,7 @@ func TestBufferBasic(t *testing.T) {
const filePath = "TestBuffer.zkv" const filePath = "TestBuffer.zkv"
defer os.Remove(filePath) defer os.Remove(filePath)
db, err := OpenWithOptions(filePath, Options{BufferSize: 100}) db, err := OpenWithOptions(filePath, Options{MemoryBufferSize: 100})
assert.NoError(t, err) assert.NoError(t, err)
err = db.Set(1, make([]byte, 100)) err = db.Set(1, make([]byte, 100))
@ -190,7 +190,7 @@ func TestBufferRead(t *testing.T) {
const recordCount = 100 const recordCount = 100
defer os.Remove(filePath) defer os.Remove(filePath)
db, err := OpenWithOptions(filePath, Options{BufferSize: 100}) db, err := OpenWithOptions(filePath, Options{MemoryBufferSize: 100})
assert.NoError(t, err) assert.NoError(t, err)
for i := 1; i <= recordCount; i++ { for i := 1; i <= recordCount; i++ {