Browse Source
opt: reduce ShardsInfo memory usage with bitmap and sorted slice (#7974)
opt: reduce ShardsInfo memory usage with bitmap and sorted slice (#7974)
* opt: reduce ShardsInfo memory usage with bitmap and sorted slice - Replace map[ShardId]*ShardInfo with sorted []ShardInfo slice - Add ShardBits (uint32) bitmap for O(1) existence checks - Use binary search for O(log n) lookups by shard ID - Maintain sorted order for efficient iteration - Add comprehensive unit tests and benchmarks Memory savings: - Map overhead: ~48 bytes per entry eliminated - Pointers: 8 bytes per entry eliminated - Total: ~56 bytes per shard saved Performance improvements: - Has(): O(1) using bitmap - Size(): O(log n) using binary search (was O(1), acceptable tradeoff) - Count(): O(1) using popcount on bitmap - Iteration: Faster due to cache locality * refactor: add methods to ShardBits type - Add Has(), Set(), Clear(), and Count() methods to ShardBits - Simplify ShardsInfo methods by using ShardBits methods - Improves code readability and encapsulation * opt: use ShardBits directly in ShardsCountFromVolumeEcShardInformationMessage Avoid creating a full ShardsInfo object just to count shards. Directly cast vi.EcIndexBits to ShardBits and use Count() method. * opt: use strings.Builder in ShardsInfo.String() for efficiency * refactor: change AsSlice to return []ShardInfo (values instead of pointers) This completes the memory optimization by avoiding unnecessary pointer slices and potential allocations. * refactor: rename ShardsCountFromVolumeEcShardInformationMessage to GetShardCount * fix: prevent deadlock in Add and Subtract methods Copy shards data from 'other' before releasing its lock to avoid potential deadlock when a.Add(b) and b.Add(a) are called concurrently. The previous implementation held other's lock while calling si.Set/Delete, which acquires si's lock. This could deadlock if two goroutines tried to add/subtract each other concurrently. * opt: avoid unnecessary locking in constructor functions ShardsInfoFromVolume and ShardsInfoFromVolumeEcShardInformationMessage now build shards slice and bitmap directly without calling Set(), which acquires a lock on every call. Since the object is local and not yet shared, locking is unnecessary and adds overhead. This improves performance during object construction. * fix: rename 'copy' variable to avoid shadowing built-in function The variable name 'copy' in TestShardsInfo_Copy shadowed the built-in copy() function, which is confusing and bad practice. Renamed to 'siCopy'. * opt: use math/bits.OnesCount32 and reorganize types 1. Replace manual popcount loop with math/bits.OnesCount32 for better performance and idiomatic Go code 2. Move ShardSize type definition to ec_shards_info.go for better code organization since it's primarily used there * refactor: Set() now accepts ShardInfo for future extensibility Changed Set(id ShardId, size ShardSize) to Set(shard ShardInfo) to support future additions to ShardInfo without changing the API. This makes the code more extensible as new fields can be added to ShardInfo (e.g., checksum, location, etc.) without breaking the Set API. * refactor: move ShardInfo and ShardSize to separate file Created ec_shard_info.go to hold the basic shard types (ShardInfo and ShardSize) for better code organization and separation of concerns. * refactor: add ShardInfo constructor and helper functions Added NewShardInfo() constructor and IsValid() method to better encapsulate ShardInfo creation and validation. Updated code to use the constructor for cleaner, more maintainable code. * fix: update remaining Set() calls to use NewShardInfo constructor Fixed compilation errors in storage and shell packages where Set() calls were not updated to use the new NewShardInfo() constructor. * fix: remove unreachable code in filer backup commands Removed unreachable return statements after infinite loops in filer_backup.go and filer_meta_backup.go to fix compilation errors. * fix: rename 'new' variable to avoid shadowing built-in Renamed 'new' to 'result' in MinusParityShards, Plus, and Minus methods to avoid shadowing Go's built-in new() function. * fix: update remaining test files to use NewShardInfo constructor Fixed Set() calls in command_volume_list_test.go and ec_rebalance_slots_test.go to use NewShardInfo() constructor.pull/7875/head
committed by
GitHub
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 786 additions and 300 deletions
-
1weed/command/filer_backup.go
-
1weed/command/filer_meta_backup.go
-
2weed/shell/command_cluster_status.go
-
10weed/shell/command_ec_common.go
-
2weed/shell/command_ec_rebuild.go
-
4weed/shell/command_volume_balance.go
-
2weed/shell/command_volume_list_test.go
-
8weed/shell/ec_rebalance_slots_test.go
-
23weed/storage/erasure_coding/ec_shard_info.go
-
358weed/storage/erasure_coding/ec_shards_info.go
-
366weed/storage/erasure_coding/ec_shards_info_test.go
-
2weed/storage/erasure_coding/ec_volume.go
-
255weed/storage/erasure_coding/ec_volume_info.go
-
40weed/storage/erasure_coding/ec_volume_info_test.go
-
6weed/storage/store.go
-
4weed/storage/store_ec.go
-
2weed/worker/tasks/erasure_coding/detection.go
@ -0,0 +1,23 @@ |
|||
package erasure_coding |
|||
|
|||
// ShardSize represents the size of a shard in bytes
|
|||
type ShardSize int64 |
|||
|
|||
// ShardInfo holds information about a single shard
|
|||
type ShardInfo struct { |
|||
Id ShardId |
|||
Size ShardSize |
|||
} |
|||
|
|||
// NewShardInfo creates a new ShardInfo with the given ID and size
|
|||
func NewShardInfo(id ShardId, size ShardSize) ShardInfo { |
|||
return ShardInfo{ |
|||
Id: id, |
|||
Size: size, |
|||
} |
|||
} |
|||
|
|||
// IsValid checks if the shard info has a valid ID
|
|||
func (si ShardInfo) IsValid() bool { |
|||
return si.Id < MaxShardCount |
|||
} |
|||
@ -0,0 +1,358 @@ |
|||
package erasure_coding |
|||
|
|||
import ( |
|||
"fmt" |
|||
"math/bits" |
|||
"sort" |
|||
"strings" |
|||
"sync" |
|||
|
|||
"github.com/dustin/go-humanize" |
|||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb" |
|||
) |
|||
|
|||
// ShardBits is a bitmap representing which shards are present (bit 0 = shard 0, etc.)
|
|||
type ShardBits uint32 |
|||
|
|||
// Has checks if a shard ID is present in the bitmap
|
|||
func (sb ShardBits) Has(id ShardId) bool { |
|||
return id < MaxShardCount && sb&(1<<id) != 0 |
|||
} |
|||
|
|||
// Set sets a shard ID in the bitmap
|
|||
func (sb ShardBits) Set(id ShardId) ShardBits { |
|||
if id >= MaxShardCount { |
|||
return sb |
|||
} |
|||
return sb | (1 << id) |
|||
} |
|||
|
|||
// Clear clears a shard ID from the bitmap
|
|||
func (sb ShardBits) Clear(id ShardId) ShardBits { |
|||
if id >= MaxShardCount { |
|||
return sb |
|||
} |
|||
return sb &^ (1 << id) |
|||
} |
|||
|
|||
// Count returns the number of set bits using popcount
|
|||
func (sb ShardBits) Count() int { |
|||
return bits.OnesCount32(uint32(sb)) |
|||
} |
|||
|
|||
// ShardsInfo encapsulates information for EC shards with memory-efficient storage
|
|||
type ShardsInfo struct { |
|||
mu sync.RWMutex |
|||
shards []ShardInfo // Sorted by Id
|
|||
shardBits ShardBits |
|||
} |
|||
|
|||
func NewShardsInfo() *ShardsInfo { |
|||
return &ShardsInfo{ |
|||
shards: make([]ShardInfo, 0, TotalShardsCount), |
|||
} |
|||
} |
|||
|
|||
// Initializes a ShardsInfo from a ECVolume.
|
|||
func ShardsInfoFromVolume(ev *EcVolume) *ShardsInfo { |
|||
res := &ShardsInfo{ |
|||
shards: make([]ShardInfo, len(ev.Shards)), |
|||
} |
|||
// Build shards directly to avoid locking in Set() since res is not yet shared
|
|||
for i, s := range ev.Shards { |
|||
res.shards[i] = NewShardInfo(s.ShardId, ShardSize(s.Size())) |
|||
res.shardBits = res.shardBits.Set(s.ShardId) |
|||
} |
|||
return res |
|||
} |
|||
|
|||
// Initializes a ShardsInfo from a VolumeEcShardInformationMessage proto.
|
|||
func ShardsInfoFromVolumeEcShardInformationMessage(vi *master_pb.VolumeEcShardInformationMessage) *ShardsInfo { |
|||
res := NewShardsInfo() |
|||
if vi == nil { |
|||
return res |
|||
} |
|||
|
|||
var id ShardId |
|||
var j int |
|||
// Build shards directly to avoid locking in Set() since res is not yet shared
|
|||
newShards := make([]ShardInfo, 0, 8) |
|||
for bitmap := vi.EcIndexBits; bitmap != 0; bitmap >>= 1 { |
|||
if bitmap&1 != 0 { |
|||
var size ShardSize |
|||
if j < len(vi.ShardSizes) { |
|||
size = ShardSize(vi.ShardSizes[j]) |
|||
} |
|||
j++ |
|||
newShards = append(newShards, NewShardInfo(id, size)) |
|||
} |
|||
id++ |
|||
} |
|||
res.shards = newShards |
|||
res.shardBits = ShardBits(vi.EcIndexBits) |
|||
|
|||
return res |
|||
} |
|||
|
|||
// Returns a count of shards from a VolumeEcShardInformationMessage proto.
|
|||
func GetShardCount(vi *master_pb.VolumeEcShardInformationMessage) int { |
|||
if vi == nil { |
|||
return 0 |
|||
} |
|||
return ShardBits(vi.EcIndexBits).Count() |
|||
} |
|||
|
|||
// Returns a string representation for a ShardsInfo.
|
|||
func (sp *ShardsInfo) String() string { |
|||
sp.mu.RLock() |
|||
defer sp.mu.RUnlock() |
|||
var sb strings.Builder |
|||
for i, s := range sp.shards { |
|||
if i > 0 { |
|||
sb.WriteString(" ") |
|||
} |
|||
fmt.Fprintf(&sb, "%d:%s", s.Id, humanize.Bytes(uint64(s.Size))) |
|||
} |
|||
return sb.String() |
|||
} |
|||
|
|||
// AsSlice converts a ShardsInfo to a slice of ShardInfo structs, ordered by shard ID.
|
|||
func (si *ShardsInfo) AsSlice() []ShardInfo { |
|||
si.mu.RLock() |
|||
defer si.mu.RUnlock() |
|||
res := make([]ShardInfo, len(si.shards)) |
|||
copy(res, si.shards) |
|||
return res |
|||
} |
|||
|
|||
// Count returns the number of EC shards using popcount on the bitmap.
|
|||
func (si *ShardsInfo) Count() int { |
|||
si.mu.RLock() |
|||
defer si.mu.RUnlock() |
|||
return si.shardBits.Count() |
|||
} |
|||
|
|||
// Has verifies if a shard ID is present using bitmap check.
|
|||
func (si *ShardsInfo) Has(id ShardId) bool { |
|||
si.mu.RLock() |
|||
defer si.mu.RUnlock() |
|||
return si.shardBits.Has(id) |
|||
} |
|||
|
|||
// Ids returns a list of shard IDs, in ascending order.
|
|||
func (si *ShardsInfo) Ids() []ShardId { |
|||
si.mu.RLock() |
|||
defer si.mu.RUnlock() |
|||
ids := make([]ShardId, len(si.shards)) |
|||
for i, s := range si.shards { |
|||
ids[i] = s.Id |
|||
} |
|||
return ids |
|||
} |
|||
|
|||
// IdsInt returns a list of shards ID as int, in ascending order.
|
|||
func (si *ShardsInfo) IdsInt() []int { |
|||
ids := si.Ids() |
|||
res := make([]int, len(ids)) |
|||
for i, id := range ids { |
|||
res[i] = int(id) |
|||
} |
|||
return res |
|||
} |
|||
|
|||
// IdsUint32 returns a list of shards ID as uint32, in ascending order.
|
|||
func (si *ShardsInfo) IdsUint32() []uint32 { |
|||
return ShardIdsToUint32(si.Ids()) |
|||
} |
|||
|
|||
// Set sets or updates a shard's information.
|
|||
func (si *ShardsInfo) Set(shard ShardInfo) { |
|||
if shard.Id >= MaxShardCount { |
|||
return |
|||
} |
|||
si.mu.Lock() |
|||
defer si.mu.Unlock() |
|||
|
|||
// Check if already exists
|
|||
if si.shardBits.Has(shard.Id) { |
|||
// Find and update
|
|||
idx := si.findIndex(shard.Id) |
|||
if idx >= 0 { |
|||
si.shards[idx] = shard |
|||
} |
|||
return |
|||
} |
|||
|
|||
// Add new shard
|
|||
si.shardBits = si.shardBits.Set(shard.Id) |
|||
|
|||
// Find insertion point to keep sorted
|
|||
idx := sort.Search(len(si.shards), func(i int) bool { |
|||
return si.shards[i].Id > shard.Id |
|||
}) |
|||
|
|||
// Insert at idx
|
|||
si.shards = append(si.shards, ShardInfo{}) |
|||
copy(si.shards[idx+1:], si.shards[idx:]) |
|||
si.shards[idx] = shard |
|||
} |
|||
|
|||
// Delete deletes a shard by ID.
|
|||
func (si *ShardsInfo) Delete(id ShardId) { |
|||
if id >= MaxShardCount { |
|||
return |
|||
} |
|||
si.mu.Lock() |
|||
defer si.mu.Unlock() |
|||
|
|||
if !si.shardBits.Has(id) { |
|||
return // Not present
|
|||
} |
|||
|
|||
si.shardBits = si.shardBits.Clear(id) |
|||
|
|||
// Find and remove from slice
|
|||
idx := si.findIndex(id) |
|||
if idx >= 0 { |
|||
si.shards = append(si.shards[:idx], si.shards[idx+1:]...) |
|||
} |
|||
} |
|||
|
|||
// Bitmap returns a bitmap for all existing shard IDs.
|
|||
func (si *ShardsInfo) Bitmap() uint32 { |
|||
si.mu.RLock() |
|||
defer si.mu.RUnlock() |
|||
return uint32(si.shardBits) |
|||
} |
|||
|
|||
// Size returns the size of a given shard ID, if present.
|
|||
func (si *ShardsInfo) Size(id ShardId) ShardSize { |
|||
if id >= MaxShardCount { |
|||
return 0 |
|||
} |
|||
si.mu.RLock() |
|||
defer si.mu.RUnlock() |
|||
|
|||
if !si.shardBits.Has(id) { |
|||
return 0 |
|||
} |
|||
|
|||
idx := si.findIndex(id) |
|||
if idx >= 0 { |
|||
return si.shards[idx].Size |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
// TotalSize returns the size for all shards.
|
|||
func (si *ShardsInfo) TotalSize() ShardSize { |
|||
si.mu.RLock() |
|||
defer si.mu.RUnlock() |
|||
var total ShardSize |
|||
for _, s := range si.shards { |
|||
total += s.Size |
|||
} |
|||
return total |
|||
} |
|||
|
|||
// Sizes returns a compact slice of present shard sizes, from first to last.
|
|||
func (si *ShardsInfo) Sizes() []ShardSize { |
|||
si.mu.RLock() |
|||
defer si.mu.RUnlock() |
|||
|
|||
res := make([]ShardSize, len(si.shards)) |
|||
for i, s := range si.shards { |
|||
res[i] = s.Size |
|||
} |
|||
return res |
|||
} |
|||
|
|||
// SizesInt64 returns a compact slice of present shard sizes, from first to last, as int64.
|
|||
func (si *ShardsInfo) SizesInt64() []int64 { |
|||
sizes := si.Sizes() |
|||
res := make([]int64, len(sizes)) |
|||
for i, s := range sizes { |
|||
res[i] = int64(s) |
|||
} |
|||
return res |
|||
} |
|||
|
|||
// Copy creates a copy of a ShardInfo.
|
|||
func (si *ShardsInfo) Copy() *ShardsInfo { |
|||
si.mu.RLock() |
|||
defer si.mu.RUnlock() |
|||
|
|||
newShards := make([]ShardInfo, len(si.shards)) |
|||
copy(newShards, si.shards) |
|||
|
|||
return &ShardsInfo{ |
|||
shards: newShards, |
|||
shardBits: si.shardBits, |
|||
} |
|||
} |
|||
|
|||
// DeleteParityShards removes parity shards from a ShardInfo.
|
|||
func (si *ShardsInfo) DeleteParityShards() { |
|||
for id := DataShardsCount; id < TotalShardsCount; id++ { |
|||
si.Delete(ShardId(id)) |
|||
} |
|||
} |
|||
|
|||
// MinusParityShards creates a ShardInfo copy, but with parity shards removed.
|
|||
func (si *ShardsInfo) MinusParityShards() *ShardsInfo { |
|||
result := si.Copy() |
|||
result.DeleteParityShards() |
|||
return result |
|||
} |
|||
|
|||
// Add merges all shards from another ShardInfo into this one.
|
|||
func (si *ShardsInfo) Add(other *ShardsInfo) { |
|||
other.mu.RLock() |
|||
// Copy shards to avoid holding lock on 'other' while calling si.Set, which could deadlock.
|
|||
shardsToAdd := make([]ShardInfo, len(other.shards)) |
|||
copy(shardsToAdd, other.shards) |
|||
other.mu.RUnlock() |
|||
|
|||
for _, s := range shardsToAdd { |
|||
si.Set(s) |
|||
} |
|||
} |
|||
|
|||
// Subtract removes all shards present on another ShardInfo.
|
|||
func (si *ShardsInfo) Subtract(other *ShardsInfo) { |
|||
other.mu.RLock() |
|||
// Copy shards to avoid holding lock on 'other' while calling si.Delete, which could deadlock.
|
|||
shardsToRemove := make([]ShardInfo, len(other.shards)) |
|||
copy(shardsToRemove, other.shards) |
|||
other.mu.RUnlock() |
|||
|
|||
for _, s := range shardsToRemove { |
|||
si.Delete(s.Id) |
|||
} |
|||
} |
|||
|
|||
// Plus returns a new ShardInfo consisting of (this + other).
|
|||
func (si *ShardsInfo) Plus(other *ShardsInfo) *ShardsInfo { |
|||
result := si.Copy() |
|||
result.Add(other) |
|||
return result |
|||
} |
|||
|
|||
// Minus returns a new ShardInfo consisting of (this - other).
|
|||
func (si *ShardsInfo) Minus(other *ShardsInfo) *ShardsInfo { |
|||
result := si.Copy() |
|||
result.Subtract(other) |
|||
return result |
|||
} |
|||
|
|||
// findIndex finds the index of a shard by ID using binary search.
|
|||
// Must be called with lock held. Returns -1 if not found.
|
|||
func (si *ShardsInfo) findIndex(id ShardId) int { |
|||
idx := sort.Search(len(si.shards), func(i int) bool { |
|||
return si.shards[i].Id >= id |
|||
}) |
|||
if idx < len(si.shards) && si.shards[idx].Id == id { |
|||
return idx |
|||
} |
|||
return -1 |
|||
} |
|||
@ -0,0 +1,366 @@ |
|||
package erasure_coding |
|||
|
|||
import ( |
|||
"testing" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb" |
|||
) |
|||
|
|||
func TestShardsInfo_SetAndGet(t *testing.T) { |
|||
si := NewShardsInfo() |
|||
|
|||
// Test setting shards
|
|||
si.Set(ShardInfo{Id: 0, Size: 1000}) |
|||
si.Set(ShardInfo{Id: 5, Size: 2000}) |
|||
si.Set(ShardInfo{Id: 13, Size: 3000}) |
|||
|
|||
// Verify Has
|
|||
if !si.Has(0) { |
|||
t.Error("Expected shard 0 to exist") |
|||
} |
|||
if !si.Has(5) { |
|||
t.Error("Expected shard 5 to exist") |
|||
} |
|||
if !si.Has(13) { |
|||
t.Error("Expected shard 13 to exist") |
|||
} |
|||
if si.Has(1) { |
|||
t.Error("Expected shard 1 to not exist") |
|||
} |
|||
|
|||
// Verify Size
|
|||
if got := si.Size(0); got != 1000 { |
|||
t.Errorf("Expected size 1000, got %d", got) |
|||
} |
|||
if got := si.Size(5); got != 2000 { |
|||
t.Errorf("Expected size 2000, got %d", got) |
|||
} |
|||
if got := si.Size(13); got != 3000 { |
|||
t.Errorf("Expected size 3000, got %d", got) |
|||
} |
|||
|
|||
// Verify Count
|
|||
if got := si.Count(); got != 3 { |
|||
t.Errorf("Expected count 3, got %d", got) |
|||
} |
|||
|
|||
// Verify Bitmap
|
|||
expectedBitmap := uint32((1 << 0) | (1 << 5) | (1 << 13)) |
|||
if got := si.Bitmap(); got != expectedBitmap { |
|||
t.Errorf("Expected bitmap %b, got %b", expectedBitmap, got) |
|||
} |
|||
} |
|||
|
|||
func TestShardsInfo_SortedOrder(t *testing.T) { |
|||
si := NewShardsInfo() |
|||
|
|||
// Add shards in non-sequential order
|
|||
si.Set(ShardInfo{Id: 10, Size: 1000}) |
|||
si.Set(ShardInfo{Id: 2, Size: 2000}) |
|||
si.Set(ShardInfo{Id: 7, Size: 3000}) |
|||
si.Set(ShardInfo{Id: 0, Size: 4000}) |
|||
|
|||
// Verify Ids returns sorted order
|
|||
ids := si.Ids() |
|||
expected := []ShardId{0, 2, 7, 10} |
|||
if len(ids) != len(expected) { |
|||
t.Fatalf("Expected %d ids, got %d", len(expected), len(ids)) |
|||
} |
|||
for i, id := range ids { |
|||
if id != expected[i] { |
|||
t.Errorf("Expected id[%d]=%d, got %d", i, expected[i], id) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestShardsInfo_Delete(t *testing.T) { |
|||
si := NewShardsInfo() |
|||
|
|||
si.Set(ShardInfo{Id: 0, Size: 1000}) |
|||
si.Set(ShardInfo{Id: 5, Size: 2000}) |
|||
si.Set(ShardInfo{Id: 10, Size: 3000}) |
|||
|
|||
// Delete middle shard
|
|||
si.Delete(5) |
|||
|
|||
if si.Has(5) { |
|||
t.Error("Expected shard 5 to be deleted") |
|||
} |
|||
if !si.Has(0) || !si.Has(10) { |
|||
t.Error("Expected other shards to remain") |
|||
} |
|||
if got := si.Count(); got != 2 { |
|||
t.Errorf("Expected count 2, got %d", got) |
|||
} |
|||
|
|||
// Verify slice is still sorted
|
|||
ids := si.Ids() |
|||
if len(ids) != 2 || ids[0] != 0 || ids[1] != 10 { |
|||
t.Errorf("Expected ids [0, 10], got %v", ids) |
|||
} |
|||
} |
|||
|
|||
func TestShardsInfo_Update(t *testing.T) { |
|||
si := NewShardsInfo() |
|||
|
|||
si.Set(ShardInfo{Id: 5, Size: 1000}) |
|||
|
|||
// Update existing shard
|
|||
si.Set(ShardInfo{Id: 5, Size: 2000}) |
|||
|
|||
if got := si.Size(5); got != 2000 { |
|||
t.Errorf("Expected updated size 2000, got %d", got) |
|||
} |
|||
if got := si.Count(); got != 1 { |
|||
t.Errorf("Expected count to remain 1, got %d", got) |
|||
} |
|||
} |
|||
|
|||
func TestShardsInfo_TotalSize(t *testing.T) { |
|||
si := NewShardsInfo() |
|||
|
|||
si.Set(ShardInfo{Id: 0, Size: 1000}) |
|||
si.Set(ShardInfo{Id: 5, Size: 2000}) |
|||
si.Set(ShardInfo{Id: 10, Size: 3000}) |
|||
|
|||
expected := ShardSize(6000) |
|||
if got := si.TotalSize(); got != expected { |
|||
t.Errorf("Expected total size %d, got %d", expected, got) |
|||
} |
|||
} |
|||
|
|||
func TestShardsInfo_Sizes(t *testing.T) { |
|||
si := NewShardsInfo() |
|||
|
|||
si.Set(ShardInfo{Id: 2, Size: 100}) |
|||
si.Set(ShardInfo{Id: 5, Size: 200}) |
|||
si.Set(ShardInfo{Id: 8, Size: 300}) |
|||
|
|||
sizes := si.Sizes() |
|||
expected := []ShardSize{100, 200, 300} |
|||
|
|||
if len(sizes) != len(expected) { |
|||
t.Fatalf("Expected %d sizes, got %d", len(expected), len(sizes)) |
|||
} |
|||
for i, size := range sizes { |
|||
if size != expected[i] { |
|||
t.Errorf("Expected size[%d]=%d, got %d", i, expected[i], size) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestShardsInfo_Copy(t *testing.T) { |
|||
si := NewShardsInfo() |
|||
si.Set(ShardInfo{Id: 0, Size: 1000}) |
|||
si.Set(ShardInfo{Id: 5, Size: 2000}) |
|||
|
|||
siCopy := si.Copy() |
|||
|
|||
// Verify copy has same data
|
|||
if !siCopy.Has(0) || !siCopy.Has(5) { |
|||
t.Error("Copy should have same shards") |
|||
} |
|||
if siCopy.Size(0) != 1000 || siCopy.Size(5) != 2000 { |
|||
t.Error("Copy should have same sizes") |
|||
} |
|||
|
|||
// Modify original
|
|||
si.Set(ShardInfo{Id: 10, Size: 3000}) |
|||
|
|||
// Verify copy is independent
|
|||
if siCopy.Has(10) { |
|||
t.Error("Copy should be independent of original") |
|||
} |
|||
} |
|||
|
|||
func TestShardsInfo_AddSubtract(t *testing.T) { |
|||
si1 := NewShardsInfo() |
|||
si1.Set(ShardInfo{Id: 0, Size: 1000}) |
|||
si1.Set(ShardInfo{Id: 2, Size: 2000}) |
|||
|
|||
si2 := NewShardsInfo() |
|||
si2.Set(ShardInfo{Id: 2, Size: 9999}) // Different size
|
|||
si2.Set(ShardInfo{Id: 5, Size: 3000}) |
|||
|
|||
// Test Add
|
|||
si1.Add(si2) |
|||
if !si1.Has(0) || !si1.Has(2) || !si1.Has(5) { |
|||
t.Error("Add should merge shards") |
|||
} |
|||
if si1.Size(2) != 9999 { |
|||
t.Error("Add should update existing shard size") |
|||
} |
|||
|
|||
// Test Subtract
|
|||
si1.Subtract(si2) |
|||
if si1.Has(2) || si1.Has(5) { |
|||
t.Error("Subtract should remove shards") |
|||
} |
|||
if !si1.Has(0) { |
|||
t.Error("Subtract should keep non-matching shards") |
|||
} |
|||
} |
|||
|
|||
func TestShardsInfo_PlusMinus(t *testing.T) { |
|||
si1 := NewShardsInfo() |
|||
si1.Set(ShardInfo{Id: 0, Size: 1000}) |
|||
si1.Set(ShardInfo{Id: 2, Size: 2000}) |
|||
|
|||
si2 := NewShardsInfo() |
|||
si2.Set(ShardInfo{Id: 2, Size: 2000}) |
|||
si2.Set(ShardInfo{Id: 5, Size: 3000}) |
|||
|
|||
// Test Plus
|
|||
result := si1.Plus(si2) |
|||
if !result.Has(0) || !result.Has(2) || !result.Has(5) { |
|||
t.Error("Plus should merge into new instance") |
|||
} |
|||
if si1.Has(5) { |
|||
t.Error("Plus should not modify original") |
|||
} |
|||
|
|||
// Test Minus
|
|||
result = si1.Minus(si2) |
|||
if !result.Has(0) || result.Has(2) { |
|||
t.Error("Minus should subtract into new instance") |
|||
} |
|||
if !si1.Has(2) { |
|||
t.Error("Minus should not modify original") |
|||
} |
|||
} |
|||
|
|||
func TestShardsInfo_DeleteParityShards(t *testing.T) { |
|||
si := NewShardsInfo() |
|||
|
|||
// Add data shards (0-9)
|
|||
for i := 0; i < DataShardsCount; i++ { |
|||
si.Set(ShardInfo{Id: ShardId(i), Size: ShardSize((i + 1) * 1000)}) |
|||
} |
|||
|
|||
// Add parity shards (10-13)
|
|||
for i := DataShardsCount; i < TotalShardsCount; i++ { |
|||
si.Set(ShardInfo{Id: ShardId(i), Size: ShardSize((i + 1) * 1000)}) |
|||
} |
|||
|
|||
si.DeleteParityShards() |
|||
|
|||
// Verify only data shards remain
|
|||
for i := 0; i < DataShardsCount; i++ { |
|||
if !si.Has(ShardId(i)) { |
|||
t.Errorf("Expected data shard %d to remain", i) |
|||
} |
|||
} |
|||
for i := DataShardsCount; i < TotalShardsCount; i++ { |
|||
if si.Has(ShardId(i)) { |
|||
t.Errorf("Expected parity shard %d to be deleted", i) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestShardsInfo_FromVolumeEcShardInformationMessage(t *testing.T) { |
|||
tests := []struct { |
|||
name string |
|||
msg *master_pb.VolumeEcShardInformationMessage |
|||
wantBits uint32 |
|||
wantSizes []int64 |
|||
}{ |
|||
{ |
|||
name: "nil message", |
|||
msg: nil, |
|||
wantBits: 0, |
|||
wantSizes: []int64{}, |
|||
}, |
|||
{ |
|||
name: "single shard", |
|||
msg: &master_pb.VolumeEcShardInformationMessage{ |
|||
EcIndexBits: 1 << 5, |
|||
ShardSizes: []int64{12345}, |
|||
}, |
|||
wantBits: 1 << 5, |
|||
wantSizes: []int64{12345}, |
|||
}, |
|||
{ |
|||
name: "multiple shards", |
|||
msg: &master_pb.VolumeEcShardInformationMessage{ |
|||
EcIndexBits: (1 << 0) | (1 << 3) | (1 << 7), |
|||
ShardSizes: []int64{1000, 2000, 3000}, |
|||
}, |
|||
wantBits: (1 << 0) | (1 << 3) | (1 << 7), |
|||
wantSizes: []int64{1000, 2000, 3000}, |
|||
}, |
|||
{ |
|||
name: "missing sizes", |
|||
msg: &master_pb.VolumeEcShardInformationMessage{ |
|||
EcIndexBits: (1 << 0) | (1 << 3), |
|||
ShardSizes: []int64{1000}, |
|||
}, |
|||
wantBits: (1 << 0) | (1 << 3), |
|||
wantSizes: []int64{1000, 0}, |
|||
}, |
|||
} |
|||
|
|||
for _, tt := range tests { |
|||
t.Run(tt.name, func(t *testing.T) { |
|||
si := ShardsInfoFromVolumeEcShardInformationMessage(tt.msg) |
|||
|
|||
if got := si.Bitmap(); got != tt.wantBits { |
|||
t.Errorf("Bitmap() = %b, want %b", got, tt.wantBits) |
|||
} |
|||
|
|||
if got := si.SizesInt64(); len(got) != len(tt.wantSizes) { |
|||
t.Errorf("SizesInt64() length = %d, want %d", len(got), len(tt.wantSizes)) |
|||
} else { |
|||
for i, size := range got { |
|||
if size != tt.wantSizes[i] { |
|||
t.Errorf("SizesInt64()[%d] = %d, want %d", i, size, tt.wantSizes[i]) |
|||
} |
|||
} |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
func TestShardsInfo_String(t *testing.T) { |
|||
si := NewShardsInfo() |
|||
si.Set(ShardInfo{Id: 0, Size: 1024}) |
|||
si.Set(ShardInfo{Id: 5, Size: 2048}) |
|||
|
|||
str := si.String() |
|||
if str == "" { |
|||
t.Error("String() should not be empty") |
|||
} |
|||
// Basic validation - should contain shard IDs
|
|||
if len(str) < 3 { |
|||
t.Errorf("String() too short: %s", str) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkShardsInfo_Set(b *testing.B) { |
|||
si := NewShardsInfo() |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
si.Set(ShardInfo{Id: ShardId(i % TotalShardsCount), Size: ShardSize(i * 1000)}) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkShardsInfo_Has(b *testing.B) { |
|||
si := NewShardsInfo() |
|||
for i := 0; i < TotalShardsCount; i++ { |
|||
si.Set(ShardInfo{Id: ShardId(i), Size: ShardSize(i * 1000)}) |
|||
} |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
si.Has(ShardId(i % TotalShardsCount)) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkShardsInfo_Size(b *testing.B) { |
|||
si := NewShardsInfo() |
|||
for i := 0; i < TotalShardsCount; i++ { |
|||
si.Set(ShardInfo{Id: ShardId(i), Size: ShardSize(i * 1000)}) |
|||
} |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
si.Size(ShardId(i % TotalShardsCount)) |
|||
} |
|||
} |
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue