diff --git a/weed/storage/disk_location_ec.go b/weed/storage/disk_location_ec.go index 3049b6169..0e57d791b 100644 --- a/weed/storage/disk_location_ec.go +++ b/weed/storage/disk_location_ec.go @@ -45,13 +45,18 @@ func (l *DiskLocation) DestroyEcVolume(vid needle.VolumeId) { // unloadEcVolume removes an EC volume from memory without deleting its files on disk. // This is useful for distributed EC volumes where shards may be on other servers. func (l *DiskLocation) unloadEcVolume(vid needle.VolumeId) { + var toClose *erasure_coding.EcVolume l.ecVolumesLock.Lock() - defer l.ecVolumesLock.Unlock() - if ecVolume, found := l.ecVolumes[vid]; found { - ecVolume.Close() + toClose = ecVolume delete(l.ecVolumes, vid) } + l.ecVolumesLock.Unlock() + + // Close outside the lock to avoid holding write lock during I/O + if toClose != nil { + toClose.Close() + } } func (l *DiskLocation) CollectEcShards(vid needle.VolumeId, shardFileNames []string) (ecVolume *erasure_coding.EcVolume, found bool) { @@ -431,13 +436,13 @@ func (l *DiskLocation) removeEcVolumeFiles(collection string, vid needle.VolumeI indexBaseFileName := erasure_coding.EcShardFileName(collection, l.IdxDirectory, int(vid)) // Helper to remove a file with consistent error handling - removeFile := func(path, description string) { - if err := os.Remove(path); err != nil { + removeFile := func(filePath, description string) { + if err := os.Remove(filePath); err != nil { if !os.IsNotExist(err) { - glog.Warningf("Failed to remove incomplete %s %s: %v", description, path, err) + glog.Warningf("Failed to remove incomplete %s %s: %v", description, filePath, err) } } else { - glog.V(2).Infof("Removed incomplete %s: %s", description, path) + glog.V(2).Infof("Removed incomplete %s: %s", description, filePath) } } diff --git a/weed/storage/disk_location_ec_shard_size_test.go b/weed/storage/disk_location_ec_shard_size_test.go index 061783736..e58c1c129 100644 --- a/weed/storage/disk_location_ec_shard_size_test.go +++ b/weed/storage/disk_location_ec_shard_size_test.go @@ -5,18 +5,101 @@ import ( ) func TestCalculateExpectedShardSize(t *testing.T) { + const ( + largeBlock = 1024 * 1024 * 1024 // 1GB + smallBlock = 1024 * 1024 // 1MB + dataShards = 10 + largeBatchSize = largeBlock * dataShards // 10GB + smallBatchSize = smallBlock * dataShards // 10MB + ) + tests := []struct { name string datFileSize int64 expectedShardSize int64 description string }{ + // Edge case: empty file + { + name: "0 bytes (empty file)", + datFileSize: 0, + expectedShardSize: 0, + description: "Empty file has 0 shard size", + }, + + // Boundary tests: exact multiples of large block { name: "Exact 10GB (1 large batch)", - datFileSize: 10 * 1024 * 1024 * 1024, // 10GB = 1 large batch - expectedShardSize: 1 * 1024 * 1024 * 1024, // 1GB per shard + datFileSize: largeBatchSize, // 10GB = 1 large batch + expectedShardSize: largeBlock, // 1GB per shard description: "Exactly fits in large blocks", }, + { + name: "Exact 20GB (2 large batches)", + datFileSize: 2 * largeBatchSize, // 20GB + expectedShardSize: 2 * largeBlock, // 2GB per shard + description: "2 complete large batches", + }, + { + name: "Just under large batch (10GB - 1 byte)", + datFileSize: largeBatchSize - 1, // 10,737,418,239 bytes + expectedShardSize: 1024 * smallBlock, // 1024MB = 1GB (needs 1024 small blocks) + description: "Just under 10GB needs 1024 small blocks", + }, + { + name: "Just over large batch (10GB + 1 byte)", + datFileSize: largeBatchSize + 1, // 10GB + 1 byte + expectedShardSize: largeBlock + smallBlock, // 1GB + 1MB + description: "Just over 10GB adds 1 small block", + }, + + // Boundary tests: exact multiples of small batch + { + name: "Exact 10MB (1 small batch)", + datFileSize: smallBatchSize, // 10MB + expectedShardSize: smallBlock, // 1MB per shard + description: "Exactly fits in 1 small batch", + }, + { + name: "Exact 20MB (2 small batches)", + datFileSize: 2 * smallBatchSize, // 20MB + expectedShardSize: 2 * smallBlock, // 2MB per shard + description: "2 complete small batches", + }, + { + name: "Just under small batch (10MB - 1 byte)", + datFileSize: smallBatchSize - 1, // 10MB - 1 byte + expectedShardSize: smallBlock, // Still needs 1MB per shard (rounds up) + description: "Just under 10MB rounds up to 1 small block", + }, + { + name: "Just over small batch (10MB + 1 byte)", + datFileSize: smallBatchSize + 1, // 10MB + 1 byte + expectedShardSize: 2 * smallBlock, // 2MB per shard + description: "Just over 10MB needs 2 small blocks", + }, + + // Mixed: large batch + partial small batch + { + name: "10GB + 1MB", + datFileSize: largeBatchSize + 1*1024*1024, // 10GB + 1MB + expectedShardSize: largeBlock + smallBlock, // 1GB + 1MB + description: "1 large batch + 1MB needs 1 small block", + }, + { + name: "10GB + 5MB", + datFileSize: largeBatchSize + 5*1024*1024, // 10GB + 5MB + expectedShardSize: largeBlock + smallBlock, // 1GB + 1MB + description: "1 large batch + 5MB rounds up to 1 small block", + }, + { + name: "10GB + 15MB", + datFileSize: largeBatchSize + 15*1024*1024, // 10GB + 15MB + expectedShardSize: largeBlock + 2*smallBlock, // 1GB + 2MB + description: "1 large batch + 15MB needs 2 small blocks", + }, + + // Original test cases { name: "11GB (1 large batch + 103 small blocks)", datFileSize: 11 * 1024 * 1024 * 1024, // 11GB @@ -29,12 +112,6 @@ func TestCalculateExpectedShardSize(t *testing.T) { expectedShardSize: 1 * 1024 * 1024, // 1MB per shard (rounded up) description: "Small file rounds up to 1MB per shard", }, - { - name: "15MB (requires 2 small blocks per shard)", - datFileSize: 15 * 1024 * 1024, // 15MB - expectedShardSize: 2 * 1024 * 1024, // 2MB per shard - description: "15MB needs 2 small blocks per shard", - }, { name: "1KB (minimum size)", datFileSize: 1024,