Browse Source

admin: seed admin_script plugin config from master maintenance scripts

When the admin server starts, fetch the maintenance scripts configuration
from the master via GetMasterConfiguration. If the admin_script plugin
worker does not already have a saved config, use the master's scripts as
the default value. This enables seamless migration from master.toml
[master.maintenance] to the admin script plugin worker.

Changes:
- Add maintenance_scripts and maintenance_sleep_minutes fields to
  GetMasterConfigurationResponse in master.proto
- Populate the new fields from viper config in master_grpc_server.go
- On admin server startup, fetch the master config and seed the
  admin_script plugin config if no config exists yet
- Strip lock/unlock commands from the master scripts since the admin
  script worker handles locking automatically

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
pull/8509/head
Chris Lu 1 week ago
parent
commit
5432c03ba5
  1. 110
      weed/admin/dash/admin_server.go
  2. 62
      weed/admin/dash/admin_server_seed_test.go
  3. 2
      weed/cluster/maintenance/maintenance_config.go
  4. 3
      weed/pb/master.proto
  5. 42
      weed/pb/master_pb/master.pb.go
  6. 25
      weed/server/master_grpc_server.go
  7. 2
      weed/server/master_server.go

110
weed/admin/dash/admin_server.go

@ -11,6 +11,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
adminplugin "github.com/seaweedfs/seaweedfs/weed/admin/plugin"
"github.com/seaweedfs/seaweedfs/weed/cluster"
clustermaintenance "github.com/seaweedfs/seaweedfs/weed/cluster/maintenance"
"github.com/seaweedfs/seaweedfs/weed/credential"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
@ -256,6 +257,8 @@ func NewAdminServer(masters string, templateFS http.FileSystem, dataDir string,
} else {
server.plugin = plugin
glog.V(0).Infof("Plugin enabled")
go server.seedAdminScriptFromMaster()
}
return server
@ -273,6 +276,113 @@ func (s *AdminServer) loadTaskConfigurationsFromPersistence() {
configUpdateRegistry.UpdateAllConfigs(s.configPersistence)
}
// seedAdminScriptFromMaster fetches maintenance scripts from the master's
// configuration and uses them as the default admin_script plugin config,
// if the plugin does not already have a saved config for admin_script.
//
// MIGRATION: This exists to help users migrate from master.toml [master.maintenance]
// to the admin script plugin worker. Remove after March 2027.
func (s *AdminServer) seedAdminScriptFromMaster() {
if s.plugin == nil {
return
}
// Wait for master connection to be available
for i := 0; i < 30; i++ {
if s.masterClient.GetMaster(context.Background()) != "" {
break
}
time.Sleep(time.Second)
}
var maintenanceScripts string
var sleepMinutes uint32
err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
resp, err := client.GetMasterConfiguration(ctx, &master_pb.GetMasterConfigurationRequest{})
if err != nil {
return err
}
maintenanceScripts = resp.MaintenanceScripts
sleepMinutes = resp.MaintenanceSleepMinutes
return nil
})
if err != nil {
glog.V(1).Infof("Could not fetch master configuration for admin_script seeding: %v", err)
return
}
script := cleanMaintenanceScript(maintenanceScripts)
if script == "" {
return
}
// Only seed if the admin_script plugin does not already have a saved config
existing, err := s.plugin.LoadJobTypeConfig("admin_script")
if err != nil {
glog.Warningf("Failed to check admin_script plugin config: %v", err)
return
}
if existing != nil {
return
}
interval := int64(sleepMinutes)
if interval <= 0 {
interval = clustermaintenance.DefaultMaintenanceSleepMinutes
}
cfg := &plugin_pb.PersistedJobTypeConfig{
JobType: "admin_script",
AdminConfigValues: map[string]*plugin_pb.ConfigValue{
"script": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: script},
},
"run_interval_minutes": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: interval},
},
},
AdminRuntime: &plugin_pb.AdminRuntimeConfig{
Enabled: true,
DetectionIntervalSeconds: 60,
DetectionTimeoutSeconds: 300,
MaxJobsPerDetection: 1,
GlobalExecutionConcurrency: 1,
PerWorkerExecutionConcurrency: 1,
JobTypeMaxRuntimeSeconds: 1800,
},
UpdatedBy: "master_migration",
}
if err := s.plugin.SaveJobTypeConfig(cfg); err != nil {
glog.Warningf("Failed to seed admin_script plugin config from master: %v", err)
return
}
glog.V(0).Infof("Seeded admin_script plugin config from master maintenance scripts (interval=%dm)", interval)
}
// cleanMaintenanceScript strips lock/unlock commands and normalizes a
// maintenance script string for use with the admin script plugin worker.
//
// MIGRATION: Used by seedAdminScriptFromMaster. Remove after March 2027.
func cleanMaintenanceScript(script string) string {
script = strings.ReplaceAll(script, "\r\n", "\n")
var lines []string
for _, line := range strings.Split(script, "\n") {
trimmed := strings.TrimSpace(line)
if trimmed == "" || strings.HasPrefix(trimmed, "#") {
continue
}
lower := strings.ToLower(trimmed)
if lower == "lock" || lower == "unlock" {
continue
}
lines = append(lines, trimmed)
}
return strings.Join(lines, "\n")
}
// GetCredentialManager returns the credential manager
func (s *AdminServer) GetCredentialManager() *credential.CredentialManager {
return s.credentialManager

62
weed/admin/dash/admin_server_seed_test.go

@ -0,0 +1,62 @@
// MIGRATION: Tests for seedAdminScriptFromMaster. Remove after March 2027.
package dash
import "testing"
func TestCleanMaintenanceScript(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
{
name: "empty",
input: "",
expected: "",
},
{
name: "only lock unlock",
input: " lock\n unlock\n",
expected: "",
},
{
name: "strips lock and unlock",
input: " lock\n ec.balance -apply\n volume.fix.replication -apply\n unlock\n",
expected: "ec.balance -apply\nvolume.fix.replication -apply",
},
{
name: "case insensitive lock",
input: "Lock\nec.balance -apply\nUNLOCK",
expected: "ec.balance -apply",
},
{
name: "preserves comments removal",
input: "lock\n# a comment\nec.balance -apply\nunlock",
expected: "ec.balance -apply",
},
{
name: "no lock unlock present",
input: "ec.balance -apply\nvolume.fix.replication -apply",
expected: "ec.balance -apply\nvolume.fix.replication -apply",
},
{
name: "windows line endings",
input: "lock\r\nec.balance -apply\r\nunlock\r\n",
expected: "ec.balance -apply",
},
{
name: "typical master default",
input: "\n lock\n ec.encode -fullPercent=95 -quietFor=1h\n ec.rebuild -apply\n ec.balance -apply\n fs.log.purge -daysAgo=7\n volume.deleteEmpty -quietFor=24h -apply\n volume.balance -apply\n volume.fix.replication -apply\n s3.clean.uploads -timeAgo=24h\n unlock\n",
expected: "ec.encode -fullPercent=95 -quietFor=1h\nec.rebuild -apply\nec.balance -apply\nfs.log.purge -daysAgo=7\nvolume.deleteEmpty -quietFor=24h -apply\nvolume.balance -apply\nvolume.fix.replication -apply\ns3.clean.uploads -timeAgo=24h",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := cleanMaintenanceScript(tt.input)
if got != tt.expected {
t.Errorf("cleanMaintenanceScript(%q) = %q, want %q", tt.input, got, tt.expected)
}
})
}
}

2
weed/cluster/maintenance/maintenance_config.go

@ -1,5 +1,7 @@
package maintenance
const DefaultMaintenanceSleepMinutes = 17
const DefaultMasterMaintenanceScripts = `
lock
ec.encode -fullPercent=95 -quietFor=1h

3
weed/pb/master.proto

@ -383,6 +383,9 @@ message GetMasterConfigurationResponse {
string leader = 5;
uint32 volume_size_limit_m_b = 6;
bool volume_preallocate = 7;
// MIGRATION: fields 8-9 help migrate master.toml [master.maintenance] to admin script plugin. Remove after March 2027.
string maintenance_scripts = 8;
uint32 maintenance_sleep_minutes = 9;
}
message ListClusterNodesRequest {

42
weed/pb/master_pb/master.pb.go

@ -2973,16 +2973,18 @@ func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) {
}
type GetMasterConfigurationResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"`
MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"`
StorageBackends []*StorageBackend `protobuf:"bytes,3,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"`
DefaultReplication string `protobuf:"bytes,4,opt,name=default_replication,json=defaultReplication,proto3" json:"default_replication,omitempty"`
Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"`
VolumeSizeLimitMB uint32 `protobuf:"varint,6,opt,name=volume_size_limit_m_b,json=volumeSizeLimitMB,proto3" json:"volume_size_limit_m_b,omitempty"`
VolumePreallocate bool `protobuf:"varint,7,opt,name=volume_preallocate,json=volumePreallocate,proto3" json:"volume_preallocate,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
state protoimpl.MessageState `protogen:"open.v1"`
MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"`
MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"`
StorageBackends []*StorageBackend `protobuf:"bytes,3,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"`
DefaultReplication string `protobuf:"bytes,4,opt,name=default_replication,json=defaultReplication,proto3" json:"default_replication,omitempty"`
Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"`
VolumeSizeLimitMB uint32 `protobuf:"varint,6,opt,name=volume_size_limit_m_b,json=volumeSizeLimitMB,proto3" json:"volume_size_limit_m_b,omitempty"`
VolumePreallocate bool `protobuf:"varint,7,opt,name=volume_preallocate,json=volumePreallocate,proto3" json:"volume_preallocate,omitempty"`
MaintenanceScripts string `protobuf:"bytes,8,opt,name=maintenance_scripts,json=maintenanceScripts,proto3" json:"maintenance_scripts,omitempty"`
MaintenanceSleepMinutes uint32 `protobuf:"varint,9,opt,name=maintenance_sleep_minutes,json=maintenanceSleepMinutes,proto3" json:"maintenance_sleep_minutes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetMasterConfigurationResponse) Reset() {
@ -3064,6 +3066,20 @@ func (x *GetMasterConfigurationResponse) GetVolumePreallocate() bool {
return false
}
func (x *GetMasterConfigurationResponse) GetMaintenanceScripts() string {
if x != nil {
return x.MaintenanceScripts
}
return ""
}
func (x *GetMasterConfigurationResponse) GetMaintenanceSleepMinutes() uint32 {
if x != nil {
return x.MaintenanceSleepMinutes
}
return 0
}
type ListClusterNodesRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType,proto3" json:"client_type,omitempty"`
@ -4529,7 +4545,7 @@ const file_master_proto_rawDesc = "" +
" \x01(\bR\n" +
"isReadonly\"\x1c\n" +
"\x1aVolumeMarkReadonlyResponse\"\x1f\n" +
"\x1dGetMasterConfigurationRequest\"\xf3\x02\n" +
"\x1dGetMasterConfigurationRequest\"\xe0\x03\n" +
"\x1eGetMasterConfigurationResponse\x12'\n" +
"\x0fmetrics_address\x18\x01 \x01(\tR\x0emetricsAddress\x128\n" +
"\x18metrics_interval_seconds\x18\x02 \x01(\rR\x16metricsIntervalSeconds\x12D\n" +
@ -4537,7 +4553,9 @@ const file_master_proto_rawDesc = "" +
"\x13default_replication\x18\x04 \x01(\tR\x12defaultReplication\x12\x16\n" +
"\x06leader\x18\x05 \x01(\tR\x06leader\x120\n" +
"\x15volume_size_limit_m_b\x18\x06 \x01(\rR\x11volumeSizeLimitMB\x12-\n" +
"\x12volume_preallocate\x18\a \x01(\bR\x11volumePreallocate\"q\n" +
"\x12volume_preallocate\x18\a \x01(\bR\x11volumePreallocate\x12/\n" +
"\x13maintenance_scripts\x18\b \x01(\tR\x12maintenanceScripts\x12:\n" +
"\x19maintenance_sleep_minutes\x18\t \x01(\rR\x17maintenanceSleepMinutes\"q\n" +
"\x17ListClusterNodesRequest\x12\x1f\n" +
"\vclient_type\x18\x01 \x01(\tR\n" +
"clientType\x12\x1f\n" +

25
weed/server/master_grpc_server.go

@ -11,6 +11,7 @@ import (
"github.com/google/uuid"
"github.com/seaweedfs/seaweedfs/weed/cluster"
"github.com/seaweedfs/seaweedfs/weed/cluster/maintenance"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/storage/backend"
@ -454,14 +455,24 @@ func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_
// tell the volume servers about the leader
leader, _ := ms.Topo.Leader()
// MIGRATION: expose maintenance scripts for admin server seeding. Remove after March 2027.
v := util.GetViper()
maintenanceScripts := v.GetString("master.maintenance.scripts")
maintenanceSleepMinutes := v.GetInt("master.maintenance.sleep_minutes")
if maintenanceSleepMinutes <= 0 {
maintenanceSleepMinutes = maintenance.DefaultMaintenanceSleepMinutes
}
resp := &master_pb.GetMasterConfigurationResponse{
MetricsAddress: ms.option.MetricsAddress,
MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec),
StorageBackends: backend.ToPbStorageBackends(),
DefaultReplication: ms.option.DefaultReplicaPlacement,
VolumeSizeLimitMB: uint32(ms.option.VolumeSizeLimitMB),
VolumePreallocate: ms.option.VolumePreallocate,
Leader: string(leader),
MetricsAddress: ms.option.MetricsAddress,
MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec),
StorageBackends: backend.ToPbStorageBackends(),
DefaultReplication: ms.option.DefaultReplicaPlacement,
VolumeSizeLimitMB: uint32(ms.option.VolumeSizeLimitMB),
VolumePreallocate: ms.option.VolumePreallocate,
Leader: string(leader),
MaintenanceScripts: maintenanceScripts,
MaintenanceSleepMinutes: uint32(maintenanceSleepMinutes),
}
return resp, nil

2
weed/server/master_server.go

@ -350,7 +350,7 @@ func (ms *MasterServer) startAdminScripts() {
}
glog.V(0).Infof("adminScripts: %v", adminScripts)
v.SetDefault("master.maintenance.sleep_minutes", 17)
v.SetDefault("master.maintenance.sleep_minutes", maintenance.DefaultMaintenanceSleepMinutes)
sleepMinutes := v.GetFloat64("master.maintenance.sleep_minutes")
scriptLines := strings.Split(adminScripts, "\n")

Loading…
Cancel
Save