Browse Source

fs.configure: configurable volume growth

pull/1630/head
Chris Lu 4 years ago
parent
commit
dc304342b2
  1. 1
      other/java/client/src/main/proto/filer.proto
  2. 3
      weed/filer/filer_conf.go
  3. 2
      weed/filer/filer_notify_append.go
  4. 39
      weed/operation/assign_file_id.go
  5. 1
      weed/pb/filer.proto
  6. 29
      weed/pb/filer_pb/filer.pb.go
  7. 13
      weed/server/filer_server_handlers_write.go
  8. 31
      weed/shell/command_fs_configure.go

1
other/java/client/src/main/proto/filer.proto

@ -364,6 +364,7 @@ message FilerConf {
}
DiskType disk_type = 5;
bool fsync = 6;
uint32 volume_growth_count = 7;
}
repeated PathConf locations = 2;
}

3
weed/filer/filer_conf.go

@ -122,6 +122,9 @@ func mergePathConf(a, b *filer_pb.FilerConf_PathConf) {
a.DiskType = b.DiskType
}
a.Fsync = b.Fsync || a.Fsync
if b.VolumeGrowthCount > 0 {
a.VolumeGrowthCount = b.VolumeGrowthCount
}
}
func (fc *FilerConf) ToProto() *filer_pb.FilerConf {

2
weed/filer/filer_notify_append.go

@ -53,7 +53,7 @@ func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.Assi
Count: 1,
Collection: util.Nvl(f.metaLogCollection, rule.Collection),
Replication: util.Nvl(f.metaLogReplication, rule.Replication),
WritableVolumeCount: 1,
WritableVolumeCount: rule.VolumeGrowthCount,
}
assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest)

39
weed/operation/assign_file_id.go

@ -104,12 +104,13 @@ func LookupJwt(master string, fileId string) security.EncodedJwt {
}
type StorageOption struct {
Replication string
Collection string
DataCenter string
Rack string
TtlSeconds int32
Fsync bool
Replication string
Collection string
DataCenter string
Rack string
TtlSeconds int32
Fsync bool
VolumeGrowthCount uint32
}
func (so *StorageOption) TtlString() string {
@ -118,21 +119,23 @@ func (so *StorageOption) TtlString() string {
func (so *StorageOption) ToAssignRequests(count int) (ar *VolumeAssignRequest, altRequest *VolumeAssignRequest) {
ar = &VolumeAssignRequest{
Count: uint64(count),
Replication: so.Replication,
Collection: so.Collection,
Ttl: so.TtlString(),
DataCenter: so.DataCenter,
Rack: so.Rack,
Count: uint64(count),
Replication: so.Replication,
Collection: so.Collection,
Ttl: so.TtlString(),
DataCenter: so.DataCenter,
Rack: so.Rack,
WritableVolumeCount: so.VolumeGrowthCount,
}
if so.DataCenter != "" || so.Rack != "" {
altRequest = &VolumeAssignRequest{
Count: uint64(count),
Replication: so.Replication,
Collection: so.Collection,
Ttl: so.TtlString(),
DataCenter: "",
Rack: "",
Count: uint64(count),
Replication: so.Replication,
Collection: so.Collection,
Ttl: so.TtlString(),
DataCenter: "",
Rack: "",
WritableVolumeCount: so.VolumeGrowthCount,
}
}
return

1
weed/pb/filer.proto

@ -364,6 +364,7 @@ message FilerConf {
}
DiskType disk_type = 5;
bool fsync = 6;
uint32 volume_growth_count = 7;
}
repeated PathConf locations = 2;
}

29
weed/pb/filer_pb/filer.pb.go

@ -3169,12 +3169,13 @@ type FilerConf_PathConf struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
LocationPrefix string `protobuf:"bytes,1,opt,name=location_prefix,json=locationPrefix,proto3" json:"location_prefix,omitempty"`
Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"`
Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"`
DiskType FilerConf_PathConf_DiskType `protobuf:"varint,5,opt,name=disk_type,json=diskType,proto3,enum=filer_pb.FilerConf_PathConf_DiskType" json:"disk_type,omitempty"`
Fsync bool `protobuf:"varint,6,opt,name=fsync,proto3" json:"fsync,omitempty"`
LocationPrefix string `protobuf:"bytes,1,opt,name=location_prefix,json=locationPrefix,proto3" json:"location_prefix,omitempty"`
Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"`
Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"`
Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"`
DiskType FilerConf_PathConf_DiskType `protobuf:"varint,5,opt,name=disk_type,json=diskType,proto3,enum=filer_pb.FilerConf_PathConf_DiskType" json:"disk_type,omitempty"`
Fsync bool `protobuf:"varint,6,opt,name=fsync,proto3" json:"fsync,omitempty"`
VolumeGrowthCount uint32 `protobuf:"varint,7,opt,name=volume_growth_count,json=volumeGrowthCount,proto3" json:"volume_growth_count,omitempty"`
}
func (x *FilerConf_PathConf) Reset() {
@ -3251,6 +3252,13 @@ func (x *FilerConf_PathConf) GetFsync() bool {
return false
}
func (x *FilerConf_PathConf) GetVolumeGrowthCount() uint32 {
if x != nil {
return x.VolumeGrowthCount
}
return 0
}
var File_filer_proto protoreflect.FileDescriptor
var file_filer_proto_rawDesc = []byte{
@ -3625,13 +3633,13 @@ var file_filer_proto_rawDesc = []byte{
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x22, 0x25, 0x0a, 0x0d, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xed, 0x02, 0x0a, 0x09, 0x46, 0x69,
0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x9d, 0x03, 0x0a, 0x09, 0x46, 0x69,
0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x12, 0x3a, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f,
0x6e, 0x66, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x89, 0x02,
0x6e, 0x66, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xb9, 0x02,
0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x6f,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65,
@ -3646,7 +3654,10 @@ var file_filer_proto_rawDesc = []byte{
0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70,
0x65, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66,
0x73, 0x79, 0x6e, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x73, 0x79, 0x6e,
0x63, 0x22, 0x26, 0x0a, 0x08, 0x44, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a,
0x63, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x77,
0x74, 0x68, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11,
0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x43, 0x6f, 0x75, 0x6e,
0x74, 0x22, 0x26, 0x0a, 0x08, 0x44, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a,
0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x48, 0x44, 0x44, 0x10, 0x01,
0x12, 0x07, 0x0a, 0x03, 0x53, 0x53, 0x44, 0x10, 0x02, 0x32, 0xdc, 0x0c, 0x0a, 0x0c, 0x53, 0x65,
0x61, 0x77, 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x14, 0x4c, 0x6f,

13
weed/server/filer_server_handlers_write.go

@ -135,12 +135,13 @@ func (fs *FilerServer) detectStorageOption(requestURI, qCollection, qReplication
}
return &operation.StorageOption{
Replication: util.Nvl(replication, rule.Replication),
Collection: util.Nvl(collection, rule.Collection),
DataCenter: util.Nvl(dataCenter, fs.option.DataCenter),
Rack: util.Nvl(rack, fs.option.Rack),
TtlSeconds: ttlSeconds,
Fsync: fsync || rule.Fsync,
Replication: util.Nvl(replication, rule.Replication),
Collection: util.Nvl(collection, rule.Collection),
DataCenter: util.Nvl(dataCenter, fs.option.DataCenter),
Rack: util.Nvl(rack, fs.option.Rack),
TtlSeconds: ttlSeconds,
Fsync: fsync || rule.Fsync,
VolumeGrowthCount: rule.VolumeGrowthCount,
}
}

31
weed/shell/command_fs_configure.go

@ -11,6 +11,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -35,6 +36,9 @@ func (c *commandFsConfigure) Help() string {
fs.configure -locationPrfix=/my/folder -collection=abc
fs.configure -locationPrfix=/my/folder -collection=abc -ttl=7d
# example: configure adding only 1 physical volume for each bucket collection
fs.configure -locationPrfix=/buckets/ -volumeGrowthCount=1
# apply the changes
fs.configure -locationPrfix=/my/folder -collection=abc -apply
@ -52,6 +56,7 @@ func (c *commandFsConfigure) Do(args []string, commandEnv *CommandEnv, writer io
replication := fsConfigureCommand.String("replication", "", "assign writes with this replication")
ttl := fsConfigureCommand.String("ttl", "", "assign writes with this ttl")
fsync := fsConfigureCommand.Bool("fsync", false, "fsync for the writes")
volumeGrowthCount := fsConfigureCommand.Int("volumeGrowthCount", 0, "the number of physical volumes to add if no writable volumes")
isDelete := fsConfigureCommand.Bool("delete", false, "delete the configuration by locationPrefix")
apply := fsConfigureCommand.Bool("apply", false, "update and apply filer configuration")
if err = fsConfigureCommand.Parse(args); err != nil {
@ -83,15 +88,31 @@ func (c *commandFsConfigure) Do(args []string, commandEnv *CommandEnv, writer io
if *locationPrefix != "" {
locConf := &filer_pb.FilerConf_PathConf{
LocationPrefix: *locationPrefix,
Collection: *collection,
Replication: *replication,
Ttl: *ttl,
Fsync: *fsync,
LocationPrefix: *locationPrefix,
Collection: *collection,
Replication: *replication,
Ttl: *ttl,
Fsync: *fsync,
VolumeGrowthCount: uint32(*volumeGrowthCount),
}
// check collection
if *collection != "" && strings.HasPrefix(*locationPrefix, "/buckets/") {
return fmt.Errorf("one s3 bucket goes to one collection and not customizable.")
}
// check replication
if *replication != "" {
rp, err := super_block.NewReplicaPlacementFromString(*replication)
if err != nil {
return fmt.Errorf("parse replication %s: %v", *replication, err)
}
if *volumeGrowthCount % rp.GetCopyCount() != 0 {
return fmt.Errorf("volumeGrowthCount %d should be devided by replication copy count %d", *volumeGrowthCount, rp.GetCopyCount())
}
}
// save it
if *isDelete {
fc.DeleteLocationConf(*locationPrefix)
} else {

Loading…
Cancel
Save