diff --git a/weed/admin/dash/admin_server.go b/weed/admin/dash/admin_server.go index 2ecadaec4..2c129c671 100644 --- a/weed/admin/dash/admin_server.go +++ b/weed/admin/dash/admin_server.go @@ -11,6 +11,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" adminplugin "github.com/seaweedfs/seaweedfs/weed/admin/plugin" "github.com/seaweedfs/seaweedfs/weed/cluster" + clustermaintenance "github.com/seaweedfs/seaweedfs/weed/cluster/maintenance" "github.com/seaweedfs/seaweedfs/weed/credential" "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/pb" @@ -234,22 +235,19 @@ func NewAdminServer(masters string, templateFS http.FileSystem, dataDir string, }() } - plugin, err := adminplugin.New(adminplugin.Options{ + pluginOpts := adminplugin.Options{ DataDir: dataDir, ClusterContextProvider: func(_ context.Context) (*plugin_pb.ClusterContext, error) { return server.buildDefaultPluginClusterContext(), nil }, - LockManager: lockManager, - }) + LockManager: lockManager, + ConfigDefaultsProvider: server.enrichConfigDefaults, + } + plugin, err := adminplugin.New(pluginOpts) if err != nil && dataDir != "" { glog.Warningf("Failed to initialize plugin with dataDir=%q: %v. Falling back to in-memory plugin state.", dataDir, err) - plugin, err = adminplugin.New(adminplugin.Options{ - DataDir: "", - ClusterContextProvider: func(_ context.Context) (*plugin_pb.ClusterContext, error) { - return server.buildDefaultPluginClusterContext(), nil - }, - LockManager: lockManager, - }) + pluginOpts.DataDir = "" + plugin, err = adminplugin.New(pluginOpts) } if err != nil { glog.Errorf("Failed to initialize plugin: %v", err) @@ -273,6 +271,89 @@ func (s *AdminServer) loadTaskConfigurationsFromPersistence() { configUpdateRegistry.UpdateAllConfigs(s.configPersistence) } +// enrichConfigDefaults is called by the plugin when bootstrapping a job type's +// default config from its descriptor. For admin_script, it fetches maintenance +// scripts from the master and uses them as the script default. +// +// MIGRATION: This exists to help users migrate from master.toml [master.maintenance] +// to the admin script plugin worker. Remove after March 2027. +func (s *AdminServer) enrichConfigDefaults(cfg *plugin_pb.PersistedJobTypeConfig) *plugin_pb.PersistedJobTypeConfig { + if cfg.JobType != "admin_script" { + return cfg + } + + var maintenanceScripts string + var sleepMinutes uint32 + err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + resp, err := client.GetMasterConfiguration(ctx, &master_pb.GetMasterConfigurationRequest{}) + if err != nil { + return err + } + maintenanceScripts = resp.MaintenanceScripts + sleepMinutes = resp.MaintenanceSleepMinutes + return nil + }) + if err != nil { + glog.V(1).Infof("Could not fetch master configuration for admin_script defaults: %v", err) + return cfg + } + + script := cleanMaintenanceScript(maintenanceScripts) + if script == "" { + return cfg + } + + interval := int64(sleepMinutes) + if interval <= 0 { + interval = clustermaintenance.DefaultMaintenanceSleepMinutes + } + + glog.V(0).Infof("Enriching admin_script defaults from master maintenance scripts (interval=%dm)", interval) + + if cfg.AdminConfigValues == nil { + cfg.AdminConfigValues = make(map[string]*plugin_pb.ConfigValue) + } + cfg.AdminConfigValues["script"] = &plugin_pb.ConfigValue{ + Kind: &plugin_pb.ConfigValue_StringValue{StringValue: script}, + } + cfg.AdminConfigValues["run_interval_minutes"] = &plugin_pb.ConfigValue{ + Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: interval}, + } + cfg.UpdatedBy = "master_migration" + + return cfg +} + +// cleanMaintenanceScript strips lock/unlock commands and normalizes a +// maintenance script string for use with the admin script plugin worker. +// +// MIGRATION: Used by enrichConfigDefaults. Remove after March 2027. +func cleanMaintenanceScript(script string) string { + script = strings.ReplaceAll(script, "\r\n", "\n") + var lines []string + for _, line := range strings.Split(script, "\n") { + trimmed := strings.TrimSpace(line) + if trimmed == "" || strings.HasPrefix(trimmed, "#") { + continue + } + // Strip inline comments (e.g., "lock # migration note") + if idx := strings.Index(trimmed, "#"); idx >= 0 { + trimmed = strings.TrimSpace(trimmed[:idx]) + if trimmed == "" { + continue + } + } + firstToken := strings.ToLower(strings.Fields(trimmed)[0]) + if firstToken == "lock" || firstToken == "unlock" { + continue + } + lines = append(lines, trimmed) + } + return strings.Join(lines, "\n") +} + // GetCredentialManager returns the credential manager func (s *AdminServer) GetCredentialManager() *credential.CredentialManager { return s.credentialManager diff --git a/weed/admin/dash/admin_server_seed_test.go b/weed/admin/dash/admin_server_seed_test.go new file mode 100644 index 000000000..e2aa0d4f9 --- /dev/null +++ b/weed/admin/dash/admin_server_seed_test.go @@ -0,0 +1,77 @@ +// MIGRATION: Tests for enrichConfigDefaults helpers. Remove after March 2027. +package dash + +import "testing" + +func TestCleanMaintenanceScript(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "empty", + input: "", + expected: "", + }, + { + name: "only lock unlock", + input: " lock\n unlock\n", + expected: "", + }, + { + name: "strips lock and unlock", + input: " lock\n ec.balance -apply\n volume.fix.replication -apply\n unlock\n", + expected: "ec.balance -apply\nvolume.fix.replication -apply", + }, + { + name: "case insensitive lock", + input: "Lock\nec.balance -apply\nUNLOCK", + expected: "ec.balance -apply", + }, + { + name: "preserves comments removal", + input: "lock\n# a comment\nec.balance -apply\nunlock", + expected: "ec.balance -apply", + }, + { + name: "no lock unlock present", + input: "ec.balance -apply\nvolume.fix.replication -apply", + expected: "ec.balance -apply\nvolume.fix.replication -apply", + }, + { + name: "windows line endings", + input: "lock\r\nec.balance -apply\r\nunlock\r\n", + expected: "ec.balance -apply", + }, + { + name: "lock with inline comment", + input: "lock # migration\nec.balance -apply\nunlock # done", + expected: "ec.balance -apply", + }, + { + name: "command with inline comment preserved", + input: "lock\nec.balance -apply # rebalance shards\nunlock", + expected: "ec.balance -apply", + }, + { + name: "only inline comment after stripping", + input: "# full line comment\n # indented comment\n", + expected: "", + }, + { + name: "typical master default", + input: "\n lock\n ec.encode -fullPercent=95 -quietFor=1h\n ec.rebuild -apply\n ec.balance -apply\n fs.log.purge -daysAgo=7\n volume.deleteEmpty -quietFor=24h -apply\n volume.balance -apply\n volume.fix.replication -apply\n s3.clean.uploads -timeAgo=24h\n unlock\n", + expected: "ec.encode -fullPercent=95 -quietFor=1h\nec.rebuild -apply\nec.balance -apply\nfs.log.purge -daysAgo=7\nvolume.deleteEmpty -quietFor=24h -apply\nvolume.balance -apply\nvolume.fix.replication -apply\ns3.clean.uploads -timeAgo=24h", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := cleanMaintenanceScript(tt.input) + if got != tt.expected { + t.Errorf("cleanMaintenanceScript(%q) = %q, want %q", tt.input, got, tt.expected) + } + }) + } +} diff --git a/weed/admin/plugin/config_store.go b/weed/admin/plugin/config_store.go index 9a2b8484e..c36f7bd67 100644 --- a/weed/admin/plugin/config_store.go +++ b/weed/admin/plugin/config_store.go @@ -256,6 +256,53 @@ func (s *ConfigStore) SaveJobTypeConfig(config *plugin_pb.PersistedJobTypeConfig return nil } +// SaveJobTypeConfigIfNotExists atomically checks whether a config for the +// given job type already exists and only persists config when none is found. +// Returns true if the config was saved, false if a config already existed. +func (s *ConfigStore) SaveJobTypeConfigIfNotExists(config *plugin_pb.PersistedJobTypeConfig) (bool, error) { + if config == nil { + return false, fmt.Errorf("job type config is nil") + } + if config.JobType == "" { + return false, fmt.Errorf("job type config has empty job_type") + } + sanitizedJobType, err := sanitizeJobType(config.JobType) + if err != nil { + return false, err + } + config.JobType = sanitizedJobType + + clone := proto.Clone(config).(*plugin_pb.PersistedJobTypeConfig) + + s.mu.Lock() + defer s.mu.Unlock() + + if !s.configured { + if _, exists := s.memConfigs[config.JobType]; exists { + return false, nil + } + s.memConfigs[config.JobType] = clone + return true, nil + } + + pbPath := filepath.Join(s.baseDir, jobTypesDirName, config.JobType, configPBFileName) + if _, statErr := os.Stat(pbPath); statErr == nil { + return false, nil + } + + jobTypeDir, err := s.ensureJobTypeDir(config.JobType) + if err != nil { + return false, err + } + + jsonPath := filepath.Join(jobTypeDir, configJSONFileName) + if err := writeProtoFiles(clone, filepath.Join(jobTypeDir, configPBFileName), jsonPath); err != nil { + return false, fmt.Errorf("save job type config for %s: %w", config.JobType, err) + } + + return true, nil +} + func (s *ConfigStore) LoadJobTypeConfig(jobType string) (*plugin_pb.PersistedJobTypeConfig, error) { if _, err := sanitizeJobType(jobType); err != nil { return nil, err diff --git a/weed/admin/plugin/config_store_test.go b/weed/admin/plugin/config_store_test.go index 689ec4e0a..f53a2699d 100644 --- a/weed/admin/plugin/config_store_test.go +++ b/weed/admin/plugin/config_store_test.go @@ -208,6 +208,81 @@ func TestConfigStoreMonitorStateRoundTrip(t *testing.T) { } } +func TestConfigStoreSaveJobTypeConfigIfNotExists(t *testing.T) { + t.Parallel() + + t.Run("in-memory", func(t *testing.T) { + t.Parallel() + store, err := NewConfigStore("") + if err != nil { + t.Fatalf("NewConfigStore: %v", err) + } + testSaveJobTypeConfigIfNotExists(t, store) + }) + + t.Run("on-disk", func(t *testing.T) { + t.Parallel() + store, err := NewConfigStore(t.TempDir()) + if err != nil { + t.Fatalf("NewConfigStore: %v", err) + } + testSaveJobTypeConfigIfNotExists(t, store) + }) +} + +func testSaveJobTypeConfigIfNotExists(t *testing.T, store *ConfigStore) { + t.Helper() + + cfg := &plugin_pb.PersistedJobTypeConfig{ + JobType: "admin_script", + AdminRuntime: &plugin_pb.AdminRuntimeConfig{Enabled: true}, + } + + // First call should save. + saved, err := store.SaveJobTypeConfigIfNotExists(cfg) + if err != nil { + t.Fatalf("first SaveJobTypeConfigIfNotExists: %v", err) + } + if !saved { + t.Fatal("expected first call to save the config") + } + + // Second call with same job type should not save. + saved, err = store.SaveJobTypeConfigIfNotExists(&plugin_pb.PersistedJobTypeConfig{ + JobType: "admin_script", + AdminRuntime: &plugin_pb.AdminRuntimeConfig{Enabled: false}, + }) + if err != nil { + t.Fatalf("second SaveJobTypeConfigIfNotExists: %v", err) + } + if saved { + t.Fatal("expected second call to be a no-op") + } + + // Verify the original config was preserved. + loaded, err := store.LoadJobTypeConfig("admin_script") + if err != nil { + t.Fatalf("LoadJobTypeConfig: %v", err) + } + if loaded == nil { + t.Fatal("expected config to exist") + } + if !loaded.AdminRuntime.Enabled { + t.Fatal("expected original config (Enabled=true) to be preserved") + } + + // Different job type should still save. + saved, err = store.SaveJobTypeConfigIfNotExists(&plugin_pb.PersistedJobTypeConfig{ + JobType: "vacuum", + }) + if err != nil { + t.Fatalf("SaveJobTypeConfigIfNotExists for different type: %v", err) + } + if !saved { + t.Fatal("expected save for a different job type") + } +} + func TestConfigStoreJobDetailRoundTrip(t *testing.T) { t.Parallel() diff --git a/weed/admin/plugin/plugin.go b/weed/admin/plugin/plugin.go index aecf44757..18ebcb891 100644 --- a/weed/admin/plugin/plugin.go +++ b/weed/admin/plugin/plugin.go @@ -34,6 +34,11 @@ type Options struct { SchedulerTick time.Duration ClusterContextProvider func(context.Context) (*plugin_pb.ClusterContext, error) LockManager LockManager + // ConfigDefaultsProvider is an optional callback invoked when a job type's + // config is being bootstrapped from its descriptor defaults. It can enrich + // or replace the default config before it is persisted. If nil, descriptor + // defaults are used as-is. + ConfigDefaultsProvider func(config *plugin_pb.PersistedJobTypeConfig) *plugin_pb.PersistedJobTypeConfig } // JobTypeInfo contains metadata about a plugin job type. @@ -54,6 +59,7 @@ type Plugin struct { schedulerTick time.Duration clusterContextProvider func(context.Context) (*plugin_pb.ClusterContext, error) + configDefaultsProvider func(config *plugin_pb.PersistedJobTypeConfig) *plugin_pb.PersistedJobTypeConfig lockManager LockManager schedulerMu sync.Mutex @@ -161,6 +167,7 @@ func New(options Options) (*Plugin, error) { sendTimeout: sendTimeout, schedulerTick: schedulerTick, clusterContextProvider: options.ClusterContextProvider, + configDefaultsProvider: options.ConfigDefaultsProvider, lockManager: options.LockManager, sessions: make(map[string]*streamSession), pendingSchema: make(map[string]chan *plugin_pb.ConfigSchemaResponse), @@ -402,6 +409,7 @@ func (r *Plugin) SaveJobTypeConfig(config *plugin_pb.PersistedJobTypeConfig) err return nil } + func (r *Plugin) LoadDescriptor(jobType string) (*plugin_pb.JobTypeDescriptor, error) { return r.store.LoadDescriptor(jobType) } @@ -1035,14 +1043,6 @@ func (r *Plugin) ensureJobTypeConfigFromDescriptor(jobType string, descriptor *p return nil } - existing, err := r.store.LoadJobTypeConfig(jobType) - if err != nil { - return err - } - if existing != nil { - return nil - } - workerDefaults := CloneConfigValueMap(descriptor.WorkerDefaultValues) if len(workerDefaults) == 0 && descriptor.WorkerConfigForm != nil { workerDefaults = CloneConfigValueMap(descriptor.WorkerConfigForm.DefaultValues) @@ -1079,7 +1079,22 @@ func (r *Plugin) ensureJobTypeConfigFromDescriptor(jobType string, descriptor *p UpdatedBy: "plugin", } - return r.store.SaveJobTypeConfig(cfg) + // Check existence first to avoid calling configDefaultsProvider unnecessarily + // (e.g., it may make a blocking gRPC call to fetch master config). + existing, err := r.store.LoadJobTypeConfig(jobType) + if err != nil { + return err + } + if existing != nil { + return nil + } + + if r.configDefaultsProvider != nil { + cfg = r.configDefaultsProvider(cfg) + } + + _, err = r.store.SaveJobTypeConfigIfNotExists(cfg) + return err } func (r *Plugin) handleDetectionProposals(workerID string, message *plugin_pb.DetectionProposals) { diff --git a/weed/cluster/maintenance/maintenance_config.go b/weed/cluster/maintenance/maintenance_config.go index 1fc725b5b..2fab60f71 100644 --- a/weed/cluster/maintenance/maintenance_config.go +++ b/weed/cluster/maintenance/maintenance_config.go @@ -1,5 +1,7 @@ package maintenance +const DefaultMaintenanceSleepMinutes = 17 + const DefaultMasterMaintenanceScripts = ` lock ec.encode -fullPercent=95 -quietFor=1h diff --git a/weed/pb/master.proto b/weed/pb/master.proto index 337ed88b9..8289cd233 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -383,6 +383,9 @@ message GetMasterConfigurationResponse { string leader = 5; uint32 volume_size_limit_m_b = 6; bool volume_preallocate = 7; + // MIGRATION: fields 8-9 help migrate master.toml [master.maintenance] to admin script plugin. Remove after March 2027. + string maintenance_scripts = 8; + uint32 maintenance_sleep_minutes = 9; } message ListClusterNodesRequest { diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index faa03df2d..20a74f633 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -2973,16 +2973,18 @@ func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { } type GetMasterConfigurationResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` - MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"` - StorageBackends []*StorageBackend `protobuf:"bytes,3,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"` - DefaultReplication string `protobuf:"bytes,4,opt,name=default_replication,json=defaultReplication,proto3" json:"default_replication,omitempty"` - Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"` - VolumeSizeLimitMB uint32 `protobuf:"varint,6,opt,name=volume_size_limit_m_b,json=volumeSizeLimitMB,proto3" json:"volume_size_limit_m_b,omitempty"` - VolumePreallocate bool `protobuf:"varint,7,opt,name=volume_preallocate,json=volumePreallocate,proto3" json:"volume_preallocate,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` + MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"` + StorageBackends []*StorageBackend `protobuf:"bytes,3,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"` + DefaultReplication string `protobuf:"bytes,4,opt,name=default_replication,json=defaultReplication,proto3" json:"default_replication,omitempty"` + Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"` + VolumeSizeLimitMB uint32 `protobuf:"varint,6,opt,name=volume_size_limit_m_b,json=volumeSizeLimitMB,proto3" json:"volume_size_limit_m_b,omitempty"` + VolumePreallocate bool `protobuf:"varint,7,opt,name=volume_preallocate,json=volumePreallocate,proto3" json:"volume_preallocate,omitempty"` + MaintenanceScripts string `protobuf:"bytes,8,opt,name=maintenance_scripts,json=maintenanceScripts,proto3" json:"maintenance_scripts,omitempty"` + MaintenanceSleepMinutes uint32 `protobuf:"varint,9,opt,name=maintenance_sleep_minutes,json=maintenanceSleepMinutes,proto3" json:"maintenance_sleep_minutes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetMasterConfigurationResponse) Reset() { @@ -3064,6 +3066,20 @@ func (x *GetMasterConfigurationResponse) GetVolumePreallocate() bool { return false } +func (x *GetMasterConfigurationResponse) GetMaintenanceScripts() string { + if x != nil { + return x.MaintenanceScripts + } + return "" +} + +func (x *GetMasterConfigurationResponse) GetMaintenanceSleepMinutes() uint32 { + if x != nil { + return x.MaintenanceSleepMinutes + } + return 0 +} + type ListClusterNodesRequest struct { state protoimpl.MessageState `protogen:"open.v1"` ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType,proto3" json:"client_type,omitempty"` @@ -4529,7 +4545,7 @@ const file_master_proto_rawDesc = "" + " \x01(\bR\n" + "isReadonly\"\x1c\n" + "\x1aVolumeMarkReadonlyResponse\"\x1f\n" + - "\x1dGetMasterConfigurationRequest\"\xf3\x02\n" + + "\x1dGetMasterConfigurationRequest\"\xe0\x03\n" + "\x1eGetMasterConfigurationResponse\x12'\n" + "\x0fmetrics_address\x18\x01 \x01(\tR\x0emetricsAddress\x128\n" + "\x18metrics_interval_seconds\x18\x02 \x01(\rR\x16metricsIntervalSeconds\x12D\n" + @@ -4537,7 +4553,9 @@ const file_master_proto_rawDesc = "" + "\x13default_replication\x18\x04 \x01(\tR\x12defaultReplication\x12\x16\n" + "\x06leader\x18\x05 \x01(\tR\x06leader\x120\n" + "\x15volume_size_limit_m_b\x18\x06 \x01(\rR\x11volumeSizeLimitMB\x12-\n" + - "\x12volume_preallocate\x18\a \x01(\bR\x11volumePreallocate\"q\n" + + "\x12volume_preallocate\x18\a \x01(\bR\x11volumePreallocate\x12/\n" + + "\x13maintenance_scripts\x18\b \x01(\tR\x12maintenanceScripts\x12:\n" + + "\x19maintenance_sleep_minutes\x18\t \x01(\rR\x17maintenanceSleepMinutes\"q\n" + "\x17ListClusterNodesRequest\x12\x1f\n" + "\vclient_type\x18\x01 \x01(\tR\n" + "clientType\x12\x1f\n" + diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index 848dc7905..60fdbd848 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -11,6 +11,7 @@ import ( "github.com/google/uuid" "github.com/seaweedfs/seaweedfs/weed/cluster" + "github.com/seaweedfs/seaweedfs/weed/cluster/maintenance" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/backend" @@ -454,14 +455,24 @@ func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_ // tell the volume servers about the leader leader, _ := ms.Topo.Leader() + // MIGRATION: expose maintenance scripts for admin server seeding. Remove after March 2027. + v := util.GetViper() + maintenanceScripts := v.GetString("master.maintenance.scripts") + maintenanceSleepMinutes := v.GetInt("master.maintenance.sleep_minutes") + if maintenanceSleepMinutes <= 0 { + maintenanceSleepMinutes = maintenance.DefaultMaintenanceSleepMinutes + } + resp := &master_pb.GetMasterConfigurationResponse{ - MetricsAddress: ms.option.MetricsAddress, - MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec), - StorageBackends: backend.ToPbStorageBackends(), - DefaultReplication: ms.option.DefaultReplicaPlacement, - VolumeSizeLimitMB: uint32(ms.option.VolumeSizeLimitMB), - VolumePreallocate: ms.option.VolumePreallocate, - Leader: string(leader), + MetricsAddress: ms.option.MetricsAddress, + MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec), + StorageBackends: backend.ToPbStorageBackends(), + DefaultReplication: ms.option.DefaultReplicaPlacement, + VolumeSizeLimitMB: uint32(ms.option.VolumeSizeLimitMB), + VolumePreallocate: ms.option.VolumePreallocate, + Leader: string(leader), + MaintenanceScripts: maintenanceScripts, + MaintenanceSleepMinutes: uint32(maintenanceSleepMinutes), } return resp, nil diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 429458c5b..09fc6bc74 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -350,7 +350,7 @@ func (ms *MasterServer) startAdminScripts() { } glog.V(0).Infof("adminScripts: %v", adminScripts) - v.SetDefault("master.maintenance.sleep_minutes", 17) + v.SetDefault("master.maintenance.sleep_minutes", maintenance.DefaultMaintenanceSleepMinutes) sleepMinutes := v.GetFloat64("master.maintenance.sleep_minutes") scriptLines := strings.Split(adminScripts, "\n")