Chris Lu
5 years ago
42 changed files with 1052 additions and 396 deletions
-
6unmaintained/change_superblock/change_superblock.go
-
6unmaintained/fix_dat/fix_dat.go
-
5unmaintained/remove_duplicate_fids/remove_duplicate_fids.go
-
3unmaintained/see_dat/see_dat.go
-
10weed/command/backup.go
-
8weed/command/export.go
-
5weed/command/fix.go
-
11weed/pb/volume_server.proto
-
470weed/pb/volume_server_pb/volume_server.pb.go
-
7weed/server/master_grpc_server_volume.go
-
4weed/server/master_server_handlers_admin.go
-
10weed/server/volume_grpc_copy.go
-
67weed/server/volume_grpc_erasure_coding.go
-
3weed/server/volume_grpc_tail.go
-
1weed/shell/command_ec_common.go
-
263weed/shell/command_ec_decode.go
-
1weed/shell/command_ec_rebuild.go
-
15weed/shell/command_volume_fix_replication.go
-
198weed/storage/erasure_coding/ec_decoder.go
-
2weed/storage/erasure_coding/ec_encoder.go
-
9weed/storage/erasure_coding/ec_volume_info.go
-
9weed/storage/store.go
-
6weed/storage/store_ec.go
-
2weed/storage/super_block/replica_placement.go
-
2weed/storage/super_block/replica_placement_test.go
-
69weed/storage/super_block/super_block.go
-
44weed/storage/super_block/super_block_read.go.go
-
4weed/storage/super_block/super_block_test.go
-
9weed/storage/volume.go
-
12weed/storage/volume_backup.go
-
7weed/storage/volume_info.go
-
5weed/storage/volume_loading.go
-
3weed/storage/volume_read_write.go
-
103weed/storage/volume_super_block.go
-
7weed/storage/volume_vacuum.go
-
5weed/storage/volume_vacuum_test.go
-
4weed/topology/collection.go
-
4weed/topology/topology.go
-
5weed/topology/topology_test.go
-
4weed/topology/volume_growth.go
-
3weed/topology/volume_growth_test.go
-
5weed/topology/volume_layout.go
@ -0,0 +1,263 @@ |
|||
package shell |
|||
|
|||
import ( |
|||
"context" |
|||
"flag" |
|||
"fmt" |
|||
"io" |
|||
|
|||
"google.golang.org/grpc" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/operation" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/needle" |
|||
) |
|||
|
|||
func init() { |
|||
Commands = append(Commands, &commandEcDecode{}) |
|||
} |
|||
|
|||
type commandEcDecode struct { |
|||
} |
|||
|
|||
func (c *commandEcDecode) Name() string { |
|||
return "ec.decode" |
|||
} |
|||
|
|||
func (c *commandEcDecode) Help() string { |
|||
return `decode a erasure coded volume into a normal volume |
|||
|
|||
ec.decode [-collection=""] [-volumeId=<volume_id>] |
|||
|
|||
` |
|||
} |
|||
|
|||
func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { |
|||
|
|||
encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) |
|||
volumeId := encodeCommand.Int("volumeId", 0, "the volume id") |
|||
collection := encodeCommand.String("collection", "", "the collection name") |
|||
if err = encodeCommand.Parse(args); err != nil { |
|||
return nil |
|||
} |
|||
|
|||
ctx := context.Background() |
|||
vid := needle.VolumeId(*volumeId) |
|||
|
|||
// collect topology information
|
|||
topologyInfo, err := collectTopologyInfoForEcDecode(ctx, commandEnv) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
// volumeId is provided
|
|||
if vid != 0 { |
|||
return doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid) |
|||
} |
|||
|
|||
// apply to all volumes in the collection
|
|||
volumeIds := collectEcShardIds(topologyInfo, *collection) |
|||
fmt.Printf("ec encode volumes: %v\n", volumeIds) |
|||
for _, vid := range volumeIds { |
|||
if err = doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func doEcDecode(ctx context.Context, commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) { |
|||
// find volume location
|
|||
nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid) |
|||
|
|||
fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits) |
|||
|
|||
// collect ec shards to the server with most space
|
|||
targetNodeLocation, err := collectEcShards(ctx, commandEnv, nodeToEcIndexBits, collection, vid) |
|||
if err != nil { |
|||
return fmt.Errorf("collectEcShards for volume %d: %v", vid, err) |
|||
} |
|||
|
|||
// generate a normal volume
|
|||
err = generateNormalVolume(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, targetNodeLocation) |
|||
if err != nil { |
|||
return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err) |
|||
} |
|||
|
|||
// delete the previous ec shards
|
|||
err = mountVolumeAndDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid) |
|||
if err != nil { |
|||
return fmt.Errorf("delete ec shards for volume %d: %v", vid, err) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error { |
|||
|
|||
// mount volume
|
|||
if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { |
|||
_, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ |
|||
VolumeId: uint32(vid), |
|||
}) |
|||
return mountErr |
|||
}); err != nil { |
|||
return fmt.Errorf("mountVolumeAndDeleteEcShards mount volume %d on %s: %v", vid, targetNodeLocation, err) |
|||
} |
|||
|
|||
// unmount ec shards
|
|||
for location, ecIndexBits := range nodeToEcIndexBits { |
|||
fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) |
|||
err := unmountEcShards(ctx, grpcDialOption, vid, location, ecIndexBits.ToUint32Slice()) |
|||
if err != nil { |
|||
return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err) |
|||
} |
|||
} |
|||
// delete ec shards
|
|||
for location, ecIndexBits := range nodeToEcIndexBits { |
|||
fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) |
|||
err := sourceServerDeleteEcShards(ctx, grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice()) |
|||
if err != nil { |
|||
return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err) |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func generateNormalVolume(ctx context.Context, grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error { |
|||
|
|||
fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer) |
|||
|
|||
err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { |
|||
_, genErr := volumeServerClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{ |
|||
VolumeId: uint32(vid), |
|||
Collection: collection, |
|||
}) |
|||
return genErr |
|||
}) |
|||
|
|||
return err |
|||
|
|||
} |
|||
|
|||
func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) { |
|||
|
|||
maxShardCount := 0 |
|||
var exisitngEcIndexBits erasure_coding.ShardBits |
|||
for loc, ecIndexBits := range nodeToEcIndexBits { |
|||
if ecIndexBits.ShardIdCount() > maxShardCount { |
|||
maxShardCount = ecIndexBits.ShardIdCount() |
|||
targetNodeLocation = loc |
|||
exisitngEcIndexBits = ecIndexBits |
|||
} |
|||
} |
|||
|
|||
fmt.Printf("collectEcShards: ec volume %d collect shards to %s from: %+v\n", vid, targetNodeLocation, nodeToEcIndexBits) |
|||
|
|||
var copiedEcIndexBits erasure_coding.ShardBits |
|||
for loc, ecIndexBits := range nodeToEcIndexBits { |
|||
if loc == targetNodeLocation { |
|||
continue |
|||
} |
|||
|
|||
needToCopyEcIndexBits := ecIndexBits.Minus(exisitngEcIndexBits) |
|||
if needToCopyEcIndexBits.ShardIdCount() == 0 { |
|||
continue |
|||
} |
|||
|
|||
err = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { |
|||
|
|||
fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation) |
|||
|
|||
_, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ |
|||
VolumeId: uint32(vid), |
|||
Collection: collection, |
|||
ShardIds: needToCopyEcIndexBits.ToUint32Slice(), |
|||
CopyEcxFile: false, |
|||
CopyEcjFile: true, |
|||
SourceDataNode: loc, |
|||
}) |
|||
if copyErr != nil { |
|||
return fmt.Errorf("copy %d.%v %s => %s : %v\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation, copyErr) |
|||
} |
|||
|
|||
return nil |
|||
}) |
|||
|
|||
if err != nil { |
|||
break |
|||
} |
|||
|
|||
copiedEcIndexBits = copiedEcIndexBits.Plus(needToCopyEcIndexBits) |
|||
|
|||
} |
|||
|
|||
nodeToEcIndexBits[targetNodeLocation] = exisitngEcIndexBits.Plus(copiedEcIndexBits) |
|||
|
|||
return targetNodeLocation, err |
|||
|
|||
} |
|||
|
|||
func collectTopologyInfoForEcDecode(ctx context.Context, commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, err error) { |
|||
|
|||
var resp *master_pb.VolumeListResponse |
|||
err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { |
|||
resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) |
|||
return err |
|||
}) |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
return resp.TopologyInfo, nil |
|||
|
|||
} |
|||
|
|||
func collectEcShardInfos(topoInfo *master_pb.TopologyInfo, selectedCollection string, vid needle.VolumeId) (ecShardInfos []*master_pb.VolumeEcShardInformationMessage) { |
|||
|
|||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { |
|||
for _, v := range dn.EcShardInfos { |
|||
if v.Collection == selectedCollection && v.Id == uint32(vid) { |
|||
ecShardInfos = append(ecShardInfos, v) |
|||
} |
|||
} |
|||
}) |
|||
|
|||
return |
|||
} |
|||
|
|||
func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) { |
|||
|
|||
vidMap := make(map[uint32]bool) |
|||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { |
|||
for _, v := range dn.EcShardInfos { |
|||
if v.Collection == selectedCollection { |
|||
vidMap[v.Id] = true |
|||
} |
|||
} |
|||
}) |
|||
|
|||
for vid := range vidMap { |
|||
vids = append(vids, needle.VolumeId(vid)) |
|||
} |
|||
|
|||
return |
|||
} |
|||
|
|||
func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeId) map[string]erasure_coding.ShardBits { |
|||
|
|||
nodeToEcIndexBits := make(map[string]erasure_coding.ShardBits) |
|||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { |
|||
for _, v := range dn.EcShardInfos { |
|||
if v.Id == uint32(vid) { |
|||
nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits) |
|||
} |
|||
} |
|||
}) |
|||
|
|||
return nodeToEcIndexBits |
|||
} |
@ -0,0 +1,198 @@ |
|||
package erasure_coding |
|||
|
|||
import ( |
|||
"fmt" |
|||
"io" |
|||
"os" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/storage/backend" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/idx" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/needle" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/needle_map" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/super_block" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/types" |
|||
) |
|||
|
|||
// write .idx file from .ecx and .ecj files
|
|||
func WriteIdxFileFromEcIndex(baseFileName string) (err error) { |
|||
|
|||
ecxFile, openErr := os.OpenFile(baseFileName+".ecx", os.O_RDONLY, 0644) |
|||
if openErr != nil { |
|||
return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr) |
|||
} |
|||
defer ecxFile.Close() |
|||
|
|||
idxFile, openErr := os.OpenFile(baseFileName+".idx", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) |
|||
if openErr != nil { |
|||
return fmt.Errorf("cannot open %s.idx: %v", baseFileName, openErr) |
|||
} |
|||
defer idxFile.Close() |
|||
|
|||
io.Copy(idxFile, ecxFile) |
|||
|
|||
err = iterateEcjFile(baseFileName, func(key types.NeedleId) error { |
|||
|
|||
bytes := needle_map.ToBytes(key, types.Offset{}, types.TombstoneFileSize) |
|||
idxFile.Write(bytes) |
|||
|
|||
return nil |
|||
}) |
|||
|
|||
return err |
|||
} |
|||
|
|||
// FindDatFileSize calculate .dat file size from max offset entry
|
|||
// there may be extra deletions after that entry
|
|||
// but they are deletions anyway
|
|||
func FindDatFileSize(baseFileName string) (datSize int64, err error) { |
|||
|
|||
version, err := readEcVolumeVersion(baseFileName) |
|||
if err != nil { |
|||
return 0, fmt.Errorf("read ec volume %s version: %v", baseFileName, err) |
|||
} |
|||
|
|||
err = iterateEcxFile(baseFileName, func(key types.NeedleId, offset types.Offset, size uint32) error { |
|||
|
|||
if size == types.TombstoneFileSize { |
|||
return nil |
|||
} |
|||
|
|||
entryStopOffset := offset.ToAcutalOffset() + needle.GetActualSize(size, version) |
|||
if datSize < entryStopOffset { |
|||
datSize = entryStopOffset |
|||
} |
|||
|
|||
return nil |
|||
}) |
|||
|
|||
return |
|||
} |
|||
|
|||
func readEcVolumeVersion(baseFileName string) (version needle.Version, err error) { |
|||
|
|||
// find volume version
|
|||
datFile, err := os.OpenFile(baseFileName+".ec00", os.O_RDONLY, 0644) |
|||
if err != nil { |
|||
return 0, fmt.Errorf("open ec volume %s superblock: %v", baseFileName, err) |
|||
} |
|||
datBackend := backend.NewDiskFile(datFile) |
|||
|
|||
superBlock, err := super_block.ReadSuperBlock(datBackend) |
|||
datBackend.Close() |
|||
if err != nil { |
|||
return 0, fmt.Errorf("read ec volume %s superblock: %v", baseFileName, err) |
|||
} |
|||
|
|||
return superBlock.Version, nil |
|||
|
|||
} |
|||
|
|||
func iterateEcxFile(baseFileName string, processNeedleFn func(key types.NeedleId, offset types.Offset, size uint32) error) error { |
|||
ecxFile, openErr := os.OpenFile(baseFileName+".ecx", os.O_RDONLY, 0644) |
|||
if openErr != nil { |
|||
return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr) |
|||
} |
|||
defer ecxFile.Close() |
|||
|
|||
buf := make([]byte, types.NeedleMapEntrySize) |
|||
for { |
|||
n, err := ecxFile.Read(buf) |
|||
if n != types.NeedleMapEntrySize { |
|||
if err == io.EOF { |
|||
return nil |
|||
} |
|||
return err |
|||
} |
|||
key, offset, size := idx.IdxFileEntry(buf) |
|||
if processNeedleFn != nil { |
|||
err = processNeedleFn(key, offset, size) |
|||
} |
|||
if err != nil { |
|||
if err != io.EOF { |
|||
return err |
|||
} |
|||
return nil |
|||
} |
|||
} |
|||
|
|||
} |
|||
|
|||
func iterateEcjFile(baseFileName string, processNeedleFn func(key types.NeedleId) error) error { |
|||
ecjFile, openErr := os.OpenFile(baseFileName+".ecj", os.O_RDONLY, 0644) |
|||
if openErr != nil { |
|||
return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr) |
|||
} |
|||
defer ecjFile.Close() |
|||
|
|||
buf := make([]byte, types.NeedleIdSize) |
|||
for { |
|||
n, err := ecjFile.Read(buf) |
|||
if n != types.NeedleIdSize { |
|||
if err == io.EOF { |
|||
return nil |
|||
} |
|||
return err |
|||
} |
|||
if processNeedleFn != nil { |
|||
err = processNeedleFn(types.BytesToNeedleId(buf)) |
|||
} |
|||
if err != nil { |
|||
if err == io.EOF { |
|||
return nil |
|||
} |
|||
return err |
|||
} |
|||
} |
|||
|
|||
} |
|||
|
|||
// WriteDatFile generates .dat from from .ec00 ~ .ec09 files
|
|||
func WriteDatFile(baseFileName string, datFileSize int64) error { |
|||
|
|||
datFile, openErr := os.OpenFile(baseFileName+".dat", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) |
|||
if openErr != nil { |
|||
return fmt.Errorf("cannot write volume %s.dat: %v", baseFileName, openErr) |
|||
} |
|||
defer datFile.Close() |
|||
|
|||
inputFiles := make([]*os.File, DataShardsCount) |
|||
|
|||
for shardId := 0; shardId < DataShardsCount; shardId++ { |
|||
shardFileName := baseFileName + ToExt(shardId) |
|||
inputFiles[shardId], openErr = os.OpenFile(shardFileName, os.O_RDONLY, 0) |
|||
if openErr != nil { |
|||
return openErr |
|||
} |
|||
defer inputFiles[shardId].Close() |
|||
} |
|||
|
|||
for datFileSize >= DataShardsCount*ErasureCodingLargeBlockSize { |
|||
for shardId := 0; shardId < DataShardsCount; shardId++ { |
|||
w, err := io.CopyN(datFile, inputFiles[shardId], ErasureCodingLargeBlockSize) |
|||
if w != ErasureCodingLargeBlockSize { |
|||
return fmt.Errorf("copy %s large block %d: %v", baseFileName, shardId, err) |
|||
} |
|||
datFileSize -= ErasureCodingLargeBlockSize |
|||
} |
|||
} |
|||
|
|||
for datFileSize > 0 { |
|||
for shardId := 0; shardId < DataShardsCount; shardId++ { |
|||
toRead := min(datFileSize, ErasureCodingSmallBlockSize) |
|||
w, err := io.CopyN(datFile, inputFiles[shardId], toRead) |
|||
if w != toRead { |
|||
return fmt.Errorf("copy %s small block %d: %v", baseFileName, shardId, err) |
|||
} |
|||
datFileSize -= toRead |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func min(x, y int64) int64 { |
|||
if x > y { |
|||
return y |
|||
} |
|||
return x |
|||
} |
@ -1,4 +1,4 @@ |
|||
package storage |
|||
package super_block |
|||
|
|||
import ( |
|||
"errors" |
@ -1,4 +1,4 @@ |
|||
package storage |
|||
package super_block |
|||
|
|||
import ( |
|||
"testing" |
@ -0,0 +1,69 @@ |
|||
package super_block |
|||
|
|||
import ( |
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/needle" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
const ( |
|||
SuperBlockSize = 8 |
|||
) |
|||
|
|||
/* |
|||
* Super block currently has 8 bytes allocated for each volume. |
|||
* Byte 0: version, 1 or 2 |
|||
* Byte 1: Replica Placement strategy, 000, 001, 002, 010, etc |
|||
* Byte 2 and byte 3: Time to live. See TTL for definition |
|||
* Byte 4 and byte 5: The number of times the volume has been compacted. |
|||
* Rest bytes: Reserved |
|||
*/ |
|||
type SuperBlock struct { |
|||
Version needle.Version |
|||
ReplicaPlacement *ReplicaPlacement |
|||
Ttl *needle.TTL |
|||
CompactionRevision uint16 |
|||
Extra *master_pb.SuperBlockExtra |
|||
ExtraSize uint16 |
|||
} |
|||
|
|||
func (s *SuperBlock) BlockSize() int { |
|||
switch s.Version { |
|||
case needle.Version2, needle.Version3: |
|||
return SuperBlockSize + int(s.ExtraSize) |
|||
} |
|||
return SuperBlockSize |
|||
} |
|||
|
|||
func (s *SuperBlock) Bytes() []byte { |
|||
header := make([]byte, SuperBlockSize) |
|||
header[0] = byte(s.Version) |
|||
header[1] = s.ReplicaPlacement.Byte() |
|||
s.Ttl.ToBytes(header[2:4]) |
|||
util.Uint16toBytes(header[4:6], s.CompactionRevision) |
|||
|
|||
if s.Extra != nil { |
|||
extraData, err := proto.Marshal(s.Extra) |
|||
if err != nil { |
|||
glog.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err) |
|||
} |
|||
extraSize := len(extraData) |
|||
if extraSize > 256*256-2 { |
|||
// reserve a couple of bits for future extension
|
|||
glog.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2) |
|||
} |
|||
s.ExtraSize = uint16(extraSize) |
|||
util.Uint16toBytes(header[6:8], s.ExtraSize) |
|||
|
|||
header = append(header, extraData...) |
|||
} |
|||
|
|||
return header |
|||
} |
|||
|
|||
func (s *SuperBlock) Initialized() bool { |
|||
return s.ReplicaPlacement != nil && s.Ttl != nil |
|||
} |
@ -0,0 +1,44 @@ |
|||
package super_block |
|||
|
|||
import ( |
|||
"fmt" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/backend" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/needle" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
// ReadSuperBlock reads from data file and load it into volume's super block
|
|||
func ReadSuperBlock(datBackend backend.BackendStorageFile) (superBlock SuperBlock, err error) { |
|||
|
|||
header := make([]byte, SuperBlockSize) |
|||
if _, e := datBackend.ReadAt(header, 0); e != nil { |
|||
err = fmt.Errorf("cannot read volume %s super block: %v", datBackend.Name(), e) |
|||
return |
|||
} |
|||
|
|||
superBlock.Version = needle.Version(header[0]) |
|||
if superBlock.ReplicaPlacement, err = NewReplicaPlacementFromByte(header[1]); err != nil { |
|||
err = fmt.Errorf("cannot read replica type: %s", err.Error()) |
|||
return |
|||
} |
|||
superBlock.Ttl = needle.LoadTTLFromBytes(header[2:4]) |
|||
superBlock.CompactionRevision = util.BytesToUint16(header[4:6]) |
|||
superBlock.ExtraSize = util.BytesToUint16(header[6:8]) |
|||
|
|||
if superBlock.ExtraSize > 0 { |
|||
// read more
|
|||
extraData := make([]byte, int(superBlock.ExtraSize)) |
|||
superBlock.Extra = &master_pb.SuperBlockExtra{} |
|||
err = proto.Unmarshal(extraData, superBlock.Extra) |
|||
if err != nil { |
|||
err = fmt.Errorf("cannot read volume %s super block extra: %v", datBackend.Name(), err) |
|||
return |
|||
} |
|||
} |
|||
|
|||
return |
|||
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue