Chris Lu
5 years ago
7 changed files with 138 additions and 126 deletions
-
14weed/command/export.go
-
10weed/command/fix.go
-
53weed/storage/needle_map/btree_map.go
-
112weed/storage/needle_map/memdb.go
-
13weed/storage/needle_map_memory.go
-
30weed/storage/needle_map_metric_test.go
-
32weed/storage/volume_vacuum.go
@ -1,53 +0,0 @@ |
|||
package needle_map |
|||
|
|||
import ( |
|||
. "github.com/chrislusf/seaweedfs/weed/storage/types" |
|||
"github.com/google/btree" |
|||
) |
|||
|
|||
//This map assumes mostly inserting increasing keys
|
|||
type BtreeMap struct { |
|||
tree *btree.BTree |
|||
} |
|||
|
|||
func NewBtreeMap() *BtreeMap { |
|||
return &BtreeMap{ |
|||
tree: btree.New(32), |
|||
} |
|||
} |
|||
|
|||
func (cm *BtreeMap) Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) { |
|||
found := cm.tree.ReplaceOrInsert(NeedleValue{key, offset, size}) |
|||
if found != nil { |
|||
old := found.(NeedleValue) |
|||
return old.Offset, old.Size |
|||
} |
|||
return |
|||
} |
|||
|
|||
func (cm *BtreeMap) Delete(key NeedleId) (oldSize uint32) { |
|||
found := cm.tree.Delete(NeedleValue{key, Offset{}, 0}) |
|||
if found != nil { |
|||
old := found.(NeedleValue) |
|||
return old.Size |
|||
} |
|||
return |
|||
} |
|||
func (cm *BtreeMap) Get(key NeedleId) (*NeedleValue, bool) { |
|||
found := cm.tree.Get(NeedleValue{key, Offset{}, 0}) |
|||
if found != nil { |
|||
old := found.(NeedleValue) |
|||
return &old, true |
|||
} |
|||
return nil, false |
|||
} |
|||
|
|||
// Visit visits all entries or stop if any error when visiting
|
|||
func (cm *BtreeMap) AscendingVisit(visit func(NeedleValue) error) (ret error) { |
|||
cm.tree.Ascend(func(item btree.Item) bool { |
|||
needle := item.(NeedleValue) |
|||
ret = visit(needle) |
|||
return ret == nil |
|||
}) |
|||
return ret |
|||
} |
@ -0,0 +1,112 @@ |
|||
package needle_map |
|||
|
|||
import ( |
|||
"fmt" |
|||
"os" |
|||
|
|||
"github.com/syndtr/goleveldb/leveldb" |
|||
"github.com/syndtr/goleveldb/leveldb/opt" |
|||
"github.com/syndtr/goleveldb/leveldb/storage" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/idx" |
|||
. "github.com/chrislusf/seaweedfs/weed/storage/types" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
//This map uses in memory level db
|
|||
type MemDb struct { |
|||
db *leveldb.DB |
|||
} |
|||
|
|||
func NewMemDb() *MemDb { |
|||
opts := &opt.Options{} |
|||
|
|||
var err error |
|||
t := &MemDb{} |
|||
if t.db, err = leveldb.Open(storage.NewMemStorage(), opts); err != nil { |
|||
glog.V(0).Infof("MemDb fails to open: %v", err) |
|||
return nil |
|||
} |
|||
|
|||
return t |
|||
} |
|||
|
|||
func (cm *MemDb) Set(key NeedleId, offset Offset, size uint32) error { |
|||
|
|||
bytes := ToBytes(key, offset, size) |
|||
|
|||
if err := cm.db.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize], nil); err != nil { |
|||
return fmt.Errorf("failed to write temp leveldb: %v", err) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (cm *MemDb) Delete(key NeedleId) error { |
|||
bytes := make([]byte, NeedleIdSize) |
|||
NeedleIdToBytes(bytes, key) |
|||
return cm.db.Delete(bytes, nil) |
|||
|
|||
} |
|||
func (cm *MemDb) Get(key NeedleId) (*NeedleValue, bool) { |
|||
bytes := make([]byte, NeedleIdSize) |
|||
NeedleIdToBytes(bytes[0:NeedleIdSize], key) |
|||
data, err := cm.db.Get(bytes, nil) |
|||
if err != nil || len(data) != OffsetSize+SizeSize { |
|||
return nil, false |
|||
} |
|||
offset := BytesToOffset(data[0:OffsetSize]) |
|||
size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize]) |
|||
return &NeedleValue{Key: key, Offset: offset, Size: size}, true |
|||
} |
|||
|
|||
// Visit visits all entries or stop if any error when visiting
|
|||
func (cm *MemDb) AscendingVisit(visit func(NeedleValue) error) (ret error) { |
|||
iter := cm.db.NewIterator(nil, nil) |
|||
for iter.Next() { |
|||
key := BytesToNeedleId(iter.Key()) |
|||
data := iter.Value() |
|||
offset := BytesToOffset(data[0:OffsetSize]) |
|||
size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize]) |
|||
|
|||
needle := NeedleValue{Key: key, Offset: offset, Size: size} |
|||
ret = visit(needle) |
|||
if ret != nil { |
|||
return |
|||
} |
|||
} |
|||
iter.Release() |
|||
ret = iter.Error() |
|||
|
|||
return |
|||
} |
|||
|
|||
func (cm *MemDb) SaveToIdx(idxName string) (ret error) { |
|||
idxFile, err := os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) |
|||
if err != nil { |
|||
return |
|||
} |
|||
defer idxFile.Close() |
|||
|
|||
return cm.AscendingVisit(func(value NeedleValue) error { |
|||
_, err := idxFile.Write(value.ToBytes()) |
|||
return err |
|||
}) |
|||
|
|||
} |
|||
|
|||
func (cm *MemDb) LoadFromIdx(idxName string) (ret error) { |
|||
idxFile, err := os.OpenFile(idxName, os.O_RDONLY, 0644) |
|||
if err != nil { |
|||
return |
|||
} |
|||
defer idxFile.Close() |
|||
|
|||
return idx.WalkIndexFile(idxFile, func(key NeedleId, offset Offset, size uint32) error { |
|||
if offset.IsZero() || size == TombstoneFileSize { |
|||
return nil |
|||
} |
|||
return cm.Set(key, offset, size) |
|||
}) |
|||
|
|||
} |
@ -1,30 +0,0 @@ |
|||
package storage |
|||
|
|||
import ( |
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
. "github.com/chrislusf/seaweedfs/weed/storage/types" |
|||
"io/ioutil" |
|||
"math/rand" |
|||
"testing" |
|||
) |
|||
|
|||
func TestFastLoadingNeedleMapMetrics(t *testing.T) { |
|||
|
|||
idxFile, _ := ioutil.TempFile("", "tmp.idx") |
|||
nm := NewBtreeNeedleMap(idxFile) |
|||
|
|||
for i := 0; i < 10000; i++ { |
|||
nm.Put(Uint64ToNeedleId(uint64(i+1)), Uint32ToOffset(uint32(0)), uint32(1)) |
|||
if rand.Float32() < 0.2 { |
|||
nm.Delete(Uint64ToNeedleId(uint64(rand.Int63n(int64(i))+1)), Uint32ToOffset(uint32(0))) |
|||
} |
|||
} |
|||
|
|||
mm, _ := newNeedleMapMetricFromIndexFile(idxFile) |
|||
|
|||
glog.V(0).Infof("FileCount expected %d actual %d", nm.FileCount(), mm.FileCount()) |
|||
glog.V(0).Infof("DeletedSize expected %d actual %d", nm.DeletedSize(), mm.DeletedSize()) |
|||
glog.V(0).Infof("ContentSize expected %d actual %d", nm.ContentSize(), mm.ContentSize()) |
|||
glog.V(0).Infof("DeletedCount expected %d actual %d", nm.DeletedCount(), mm.DeletedCount()) |
|||
glog.V(0).Infof("MaxFileKey expected %d actual %d", nm.MaxFileKey(), mm.MaxFileKey()) |
|||
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue