Browse Source

Unify the parameter to disable dry-run on weed shell commands to `-apply` (instead of `-force`). (#7450)

* Unify the parameter to disable dry-run on weed shell commands to --apply (instead of --force).

* lint

* refactor

* Execution Order Corrected

* handle deprecated force flag

* fix help messages

* Refactoring]: Using flag.FlagSet.Visit()

* consistent with other commands

* Checks for both flags

* fix toml files

---------

Co-authored-by: chrislu <chris.lu@gmail.com>
pull/7454/head
Lisandro Pin 5 days ago
committed by GitHub
parent
commit
76e4a51964
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 8
      docker/compose/master-cloud.toml
  2. 10
      weed/command/scaffold/master.toml
  3. 13
      weed/shell/command_collection_delete.go
  4. 11
      weed/shell/command_ec_balance.go
  5. 10
      weed/shell/command_ec_rebuild.go
  6. 25
      weed/shell/command_fs_configure.go
  7. 20
      weed/shell/command_fs_meta_change_volume_id.go
  8. 9
      weed/shell/command_volume_balance.go
  9. 9
      weed/shell/command_volume_check_disk.go
  10. 10
      weed/shell/command_volume_delete_empty.go
  11. 9
      weed/shell/command_volume_fix_replication.go
  12. 12
      weed/shell/command_volume_server_evacuate.go
  13. 21
      weed/shell/command_volume_server_leave.go
  14. 9
      weed/shell/command_volume_tier_move.go

8
docker/compose/master-cloud.toml

@ -10,10 +10,10 @@
scripts = """
lock
ec.encode -fullPercent=95 -quietFor=1h
ec.rebuild -force
ec.balance -force
volume.balance -force
volume.fix.replication -force
ec.rebuild -apply
ec.balance -apply
volume.balance -apply
volume.fix.replication -apply
unlock
"""
sleep_minutes = 17 # sleep minutes between each script execution

10
weed/command/scaffold/master.toml

@ -9,11 +9,11 @@
scripts = """
lock
ec.encode -fullPercent=95 -quietFor=1h
ec.rebuild -force
ec.balance -force
volume.deleteEmpty -quietFor=24h -force
volume.balance -force
volume.fix.replication -force
ec.rebuild -apply
ec.balance -apply
volume.deleteEmpty -quietFor=24h -apply
volume.balance -apply
volume.fix.replication -apply
s3.clean.uploads -timeAgo=24h
unlock
"""

13
weed/shell/command_collection_delete.go

@ -23,7 +23,7 @@ func (c *commandCollectionDelete) Name() string {
func (c *commandCollectionDelete) Help() string {
return `delete specified collection
collection.delete -collection <collection_name> -force
collection.delete -collection <collection_name> -apply
`
}
@ -36,11 +36,16 @@ func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writ
colDeleteCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
collectionName := colDeleteCommand.String("collection", "", "collection to delete. Use '_default_' for the empty-named collection.")
applyBalancing := colDeleteCommand.Bool("force", false, "apply the collection")
applyBalancing := colDeleteCommand.Bool("apply", false, "apply the collection")
// TODO: remove this alias
applyBalancingAlias := colDeleteCommand.Bool("force", false, "apply the collection (alias for -apply)")
if err = colDeleteCommand.Parse(args); err != nil {
return nil
}
infoAboutSimulationMode(writer, *applyBalancing, "-force")
handleDeprecatedForceFlag(writer, colDeleteCommand, applyBalancingAlias, applyBalancing)
infoAboutSimulationMode(writer, *applyBalancing, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil {
return
@ -55,7 +60,7 @@ func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writ
}
if !*applyBalancing {
fmt.Fprintf(writer, "collection '%s' will be deleted. Use -force to apply the change.\n", *collectionName)
fmt.Fprintf(writer, "collection '%s' will be deleted. Use -apply to apply the change.\n", *collectionName)
return nil
}

11
weed/shell/command_ec_balance.go

@ -20,7 +20,7 @@ func (c *commandEcBalance) Name() string {
func (c *commandEcBalance) Help() string {
return `balance all ec shards among all racks and volume servers
ec.balance [-c EACH_COLLECTION|<collection_name>] [-force] [-dataCenter <data_center>] [-shardReplicaPlacement <replica_placement>]
ec.balance [-c EACH_COLLECTION|<collection_name>] [-apply] [-dataCenter <data_center>] [-shardReplicaPlacement <replica_placement>]
Algorithm:
` + ecBalanceAlgorithmDescription
@ -36,11 +36,16 @@ func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.W
dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter")
shardReplicaPlacement := balanceCommand.String("shardReplicaPlacement", "", "replica placement for EC shards, or master default if empty")
maxParallelization := balanceCommand.Int("maxParallelization", DefaultMaxParallelization, "run up to X tasks in parallel, whenever possible")
applyBalancing := balanceCommand.Bool("force", false, "apply the balancing plan")
applyBalancing := balanceCommand.Bool("apply", false, "apply the balancing plan")
// TODO: remove this alias
applyBalancingAlias := balanceCommand.Bool("force", false, "apply the balancing plan (alias for -apply)")
if err = balanceCommand.Parse(args); err != nil {
return nil
}
infoAboutSimulationMode(writer, *applyBalancing, "-force")
handleDeprecatedForceFlag(writer, balanceCommand, applyBalancingAlias, applyBalancing)
infoAboutSimulationMode(writer, *applyBalancing, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil {
return

10
weed/shell/command_ec_rebuild.go

@ -28,7 +28,7 @@ func (c *commandEcRebuild) Name() string {
func (c *commandEcRebuild) Help() string {
return `find and rebuild missing ec shards among volume servers
ec.rebuild [-c EACH_COLLECTION|<collection_name>] [-force]
ec.rebuild [-c EACH_COLLECTION|<collection_name>] [-apply]
Algorithm:
@ -63,11 +63,15 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W
fixCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
collection := fixCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection")
applyChanges := fixCommand.Bool("force", false, "apply the changes")
applyChanges := fixCommand.Bool("apply", false, "apply the changes")
// TODO: remove this alias
applyChangesAlias := fixCommand.Bool("force", false, "apply the changes (alias for -apply)")
if err = fixCommand.Parse(args); err != nil {
return nil
}
infoAboutSimulationMode(writer, *applyChanges, "-force")
handleDeprecatedForceFlag(writer, fixCommand, applyChangesAlias, applyChanges)
infoAboutSimulationMode(writer, *applyChanges, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil {
return

25
weed/shell/command_fs_configure.go

@ -159,3 +159,28 @@ func infoAboutSimulationMode(writer io.Writer, forceMode bool, forceModeOption s
}
fmt.Fprintf(writer, "Running in simulation mode. Use \"%s\" option to apply the changes.\n", forceModeOption)
}
// handleDeprecatedForceFlag handles the deprecated -force flag by checking if it was
// explicitly provided, printing a deprecation warning, and copying its
// value to the new flag. This ensures that explicit -force=false takes precedence.
func handleDeprecatedForceFlag(writer io.Writer, fs *flag.FlagSet, forceAlias *bool, applyFlag *bool) {
forceIsSet := false
applyIsSet := false
fs.Visit(func(f *flag.Flag) {
switch f.Name {
case "force":
forceIsSet = true
case "apply":
applyIsSet = true
}
})
if forceIsSet {
if applyIsSet {
fmt.Fprintf(writer, "WARNING: both -force and -apply are set. -force is deprecated and takes precedence. Please use only -apply.\n")
} else {
fmt.Fprintf(writer, "WARNING: -force is deprecated, please use -apply instead.\n")
}
*applyFlag = *forceAlias
}
}

20
weed/shell/command_fs_meta_change_volume_id.go

@ -4,13 +4,14 @@ import (
"context"
"flag"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/util"
"io"
"os"
"strconv"
"strings"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/util"
)
func init() {
@ -27,8 +28,8 @@ func (c *commandFsMetaChangeVolumeId) Name() string {
func (c *commandFsMetaChangeVolumeId) Help() string {
return `change volume id in existing metadata.
fs.meta.changeVolumeId -dir=/path/to/a/dir -fromVolumeId=x -toVolumeId=y -force
fs.meta.changeVolumeId -dir=/path/to/a/dir -mapping=/path/to/mapping/file -force
fs.meta.changeVolumeId -dir=/path/to/a/dir -fromVolumeId=x -toVolumeId=y -apply
fs.meta.changeVolumeId -dir=/path/to/a/dir -mapping=/path/to/mapping/file -apply
The mapping file should have these lines, each line is: [fromVolumeId]=>[toVolumeId]
e.g.
@ -49,11 +50,16 @@ func (c *commandFsMetaChangeVolumeId) Do(args []string, commandEnv *CommandEnv,
mappingFileName := fsMetaChangeVolumeIdCommand.String("mapping", "", "a file with multiple volume id changes, with each line as x=>y")
fromVolumeId := fsMetaChangeVolumeIdCommand.Uint("fromVolumeId", 0, "change metadata with this volume id")
toVolumeId := fsMetaChangeVolumeIdCommand.Uint("toVolumeId", 0, "change metadata to this volume id")
isForce := fsMetaChangeVolumeIdCommand.Bool("force", false, "applying the metadata changes")
applyChanges := fsMetaChangeVolumeIdCommand.Bool("apply", false, "apply the metadata changes")
// TODO: remove this alias
applyChangesAlias := fsMetaChangeVolumeIdCommand.Bool("force", false, "apply the metadata changes (alias for -apply)")
if err = fsMetaChangeVolumeIdCommand.Parse(args); err != nil {
return err
}
handleDeprecatedForceFlag(writer, fsMetaChangeVolumeIdCommand, applyChangesAlias, applyChanges)
infoAboutSimulationMode(writer, *applyChanges, "-apply")
// load the mapping
mapping := make(map[needle.VolumeId]needle.VolumeId)
if *mappingFileName != "" {
@ -86,7 +92,7 @@ func (c *commandFsMetaChangeVolumeId) Do(args []string, commandEnv *CommandEnv,
}
if hasChanges {
println("Updating", parentPath, entry.Name)
if *isForce {
if *applyChanges {
if updateErr := filer_pb.UpdateEntry(context.Background(), client, &filer_pb.UpdateEntryRequest{
Directory: string(parentPath),
Entry: entry,

9
weed/shell/command_volume_balance.go

@ -39,7 +39,7 @@ func (c *commandVolumeBalance) Name() string {
func (c *commandVolumeBalance) Help() string {
return `balance all volumes among volume servers
volume.balance [-collection ALL_COLLECTIONS|EACH_COLLECTION|<collection_name>] [-force] [-dataCenter=<data_center_name>] [-racks=rack_name_one,rack_name_two] [-nodes=192.168.0.1:8080,192.168.0.2:8080]
volume.balance [-collection ALL_COLLECTIONS|EACH_COLLECTION|<collection_name>] [-apply] [-dataCenter=<data_center_name>] [-racks=rack_name_one,rack_name_two] [-nodes=192.168.0.1:8080,192.168.0.2:8080]
The -collection parameter supports:
- ALL_COLLECTIONS: balance across all collections
@ -92,14 +92,17 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer
nodes := balanceCommand.String("nodes", "", "only apply the balancing for this nodes")
writable := balanceCommand.Bool("writable", false, "only apply the balancing for writable volumes")
noLock := balanceCommand.Bool("noLock", false, "do not lock the admin shell at one's own risk")
applyBalancing := balanceCommand.Bool("force", false, "apply the balancing plan.")
applyBalancing := balanceCommand.Bool("apply", false, "apply the balancing plan.")
// TODO: remove this alias
applyBalancingAlias := balanceCommand.Bool("force", false, "apply the balancing plan (alias for -apply)")
if err = balanceCommand.Parse(args); err != nil {
return nil
}
handleDeprecatedForceFlag(writer, balanceCommand, applyBalancingAlias, applyBalancing)
c.writable = *writable
c.applyBalancing = *applyBalancing
infoAboutSimulationMode(writer, c.applyBalancing, "-force")
infoAboutSimulationMode(writer, c.applyBalancing, "-apply")
if *noLock {
commandEnv.noLock = true

9
weed/shell/command_volume_check_disk.go

@ -120,18 +120,21 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
slowMode := fsckCommand.Bool("slow", false, "slow mode checks all replicas even file counts are the same")
verbose := fsckCommand.Bool("v", false, "verbose mode")
volumeId := fsckCommand.Uint("volumeId", 0, "the volume id")
applyChanges := fsckCommand.Bool("force", false, "apply the fix")
applyChanges := fsckCommand.Bool("apply", false, "apply the fix")
// TODO: remove this alias
applyChangesAlias := fsckCommand.Bool("force", false, "apply the fix (alias for -apply)")
syncDeletions := fsckCommand.Bool("syncDeleted", false, "sync of deletions the fix")
nonRepairThreshold := fsckCommand.Float64("nonRepairThreshold", 0.3, "repair when missing keys is not more than this limit")
if err = fsckCommand.Parse(args); err != nil {
return nil
}
infoAboutSimulationMode(writer, *applyChanges, "-force")
handleDeprecatedForceFlag(writer, fsckCommand, applyChangesAlias, applyChanges)
infoAboutSimulationMode(writer, *applyChanges, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil {
return
}
c.env = commandEnv
c.writer = writer

10
weed/shell/command_volume_delete_empty.go

@ -26,7 +26,7 @@ func (c *commandVolumeDeleteEmpty) Name() string {
func (c *commandVolumeDeleteEmpty) Help() string {
return `delete empty volumes from all volume servers
volume.deleteEmpty -quietFor=24h -force
volume.deleteEmpty -quietFor=24h -apply
This command deletes all empty volumes from one volume server.
@ -41,11 +41,15 @@ func (c *commandVolumeDeleteEmpty) Do(args []string, commandEnv *CommandEnv, wri
volDeleteCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
quietPeriod := volDeleteCommand.Duration("quietFor", 24*time.Hour, "select empty volumes with no recent writes, avoid newly created ones")
applyBalancing := volDeleteCommand.Bool("force", false, "apply to delete empty volumes")
applyBalancing := volDeleteCommand.Bool("apply", false, "apply to delete empty volumes")
// TODO: remove this alias
applyBalancingAlias := volDeleteCommand.Bool("force", false, "apply to delete empty volumes (alias for -apply)")
if err = volDeleteCommand.Parse(args); err != nil {
return nil
}
infoAboutSimulationMode(writer, *applyBalancing, "-force")
handleDeprecatedForceFlag(writer, volDeleteCommand, applyBalancingAlias, applyBalancing)
infoAboutSimulationMode(writer, *applyBalancing, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil {
return

9
weed/shell/command_volume_fix_replication.go

@ -46,7 +46,7 @@ func (c *commandVolumeFixReplication) Help() string {
If the free slots satisfy the replication requirement, the volume content is copied over and mounted.
volume.fix.replication # do not take action
volume.fix.replication -force # actually deleting or copying the volume files and mount the volume
volume.fix.replication -apply # actually deleting or copying the volume files and mount the volume
volume.fix.replication -collectionPattern=important* # fix any collections with prefix "important"
Note:
@ -66,7 +66,9 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
volFixReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.collectionPattern = volFixReplicationCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
applyChanges := volFixReplicationCommand.Bool("force", false, "apply the fix")
applyChanges := volFixReplicationCommand.Bool("apply", false, "apply the fix")
// TODO: remove this alias
applyChangesAlias := volFixReplicationCommand.Bool("force", false, "apply the fix (alias for -apply)")
doDelete := volFixReplicationCommand.Bool("doDelete", true, "Also delete over-replicated volumes besides fixing under-replication")
doCheck := volFixReplicationCommand.Bool("doCheck", true, "Also check synchronization before deleting")
maxParallelization := volFixReplicationCommand.Int("maxParallelization", DefaultMaxParallelization, "run up to X tasks in parallel, whenever possible")
@ -76,8 +78,9 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
if err = volFixReplicationCommand.Parse(args); err != nil {
return nil
}
infoAboutSimulationMode(writer, *applyChanges, "-force")
handleDeprecatedForceFlag(writer, volFixReplicationCommand, applyChangesAlias, applyChanges)
infoAboutSimulationMode(writer, *applyChanges, "-apply")
commandEnv.noLock = !*applyChanges
if err = commandEnv.confirmIsLocked(args); *applyChanges && err != nil {

12
weed/shell/command_volume_server_evacuate.go

@ -6,12 +6,13 @@ import (
"io"
"os"
"slices"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"slices"
)
func init() {
@ -57,17 +58,20 @@ func (c *commandVolumeServerEvacuate) Do(args []string, commandEnv *CommandEnv,
c.volumeRack = vsEvacuateCommand.String("rack", "", "source rack for the volume servers")
c.targetServer = vsEvacuateCommand.String("target", "", "<host>:<port> of target volume")
skipNonMoveable := vsEvacuateCommand.Bool("skipNonMoveable", false, "skip volumes that can not be moved")
applyChange := vsEvacuateCommand.Bool("force", false, "actually apply the changes")
applyChange := vsEvacuateCommand.Bool("apply", false, "actually apply the changes")
// TODO: remove this alias
applyChangeAlias := vsEvacuateCommand.Bool("force", false, "actually apply the changes (alias for -apply)")
retryCount := vsEvacuateCommand.Int("retry", 0, "how many times to retry")
if err = vsEvacuateCommand.Parse(args); err != nil {
return nil
}
infoAboutSimulationMode(writer, *applyChange, "-force")
handleDeprecatedForceFlag(writer, vsEvacuateCommand, applyChangeAlias, applyChange)
infoAboutSimulationMode(writer, *applyChange, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil && *applyChange {
return
}
if *volumeServer == "" && *c.volumeRack == "" {
return fmt.Errorf("need to specify volume server by -node=<host>:<port> or source rack")
}

21
weed/shell/command_volume_server_leave.go

@ -4,11 +4,12 @@ import (
"context"
"flag"
"fmt"
"io"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"google.golang.org/grpc"
"io"
)
func init() {
@ -25,7 +26,7 @@ func (c *commandVolumeServerLeave) Name() string {
func (c *commandVolumeServerLeave) Help() string {
return `stop a volume server from sending heartbeats to the master
volumeServer.leave -node <volume server host:port> -force
volumeServer.leave -node <volume server host:port> [-apply]
This command enables gracefully shutting down the volume server.
The volume server will stop sending heartbeats to the master.
@ -43,11 +44,17 @@ func (c *commandVolumeServerLeave) Do(args []string, commandEnv *CommandEnv, wri
vsLeaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeServer := vsLeaveCommand.String("node", "", "<host>:<port> of the volume server")
applyChanges := vsLeaveCommand.Bool("apply", false, "apply the changes")
// TODO: remove this alias
applyChangesAlias := vsLeaveCommand.Bool("force", false, "apply the changes (alias for -apply)")
if err = vsLeaveCommand.Parse(args); err != nil {
return nil
}
if err = commandEnv.confirmIsLocked(args); err != nil {
handleDeprecatedForceFlag(writer, vsLeaveCommand, applyChangesAlias, applyChanges)
infoAboutSimulationMode(writer, *applyChanges, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil && *applyChanges {
return
}
@ -55,11 +62,15 @@ func (c *commandVolumeServerLeave) Do(args []string, commandEnv *CommandEnv, wri
return fmt.Errorf("need to specify volume server by -node=<host>:<port>")
}
return volumeServerLeave(commandEnv.option.GrpcDialOption, pb.ServerAddress(*volumeServer), writer)
return volumeServerLeave(commandEnv.option.GrpcDialOption, pb.ServerAddress(*volumeServer), writer, *applyChanges)
}
func volumeServerLeave(grpcDialOption grpc.DialOption, volumeServer pb.ServerAddress, writer io.Writer) (err error) {
func volumeServerLeave(grpcDialOption grpc.DialOption, volumeServer pb.ServerAddress, writer io.Writer, applyChanges bool) (err error) {
if !applyChanges {
fmt.Fprintf(writer, "Would ask volume server %s to leave (dry-run)\n", volumeServer)
return nil
}
return operation.WithVolumeServerClient(false, volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, leaveErr := volumeServerClient.VolumeServerLeave(context.Background(), &volume_server_pb.VolumeServerLeaveRequest{})
if leaveErr != nil {

9
weed/shell/command_volume_tier_move.go

@ -66,19 +66,22 @@ func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer
source := tierCommand.String("fromDiskType", "", "the source disk type")
target := tierCommand.String("toDiskType", "", "the target disk type")
parallelLimit := tierCommand.Int("parallelLimit", 0, "limit the number of parallel copying jobs")
applyChange := tierCommand.Bool("force", false, "actually apply the changes")
applyChange := tierCommand.Bool("apply", false, "actually apply the changes")
// TODO: remove this alias
applyChangeAlias := tierCommand.Bool("force", false, "actually apply the changes (alias for -apply)")
ioBytePerSecond := tierCommand.Int64("ioBytePerSecond", 0, "limit the speed of move")
replicationString := tierCommand.String("toReplication", "", "the new target replication setting")
if err = tierCommand.Parse(args); err != nil {
return nil
}
infoAboutSimulationMode(writer, *applyChange, "-force")
handleDeprecatedForceFlag(writer, tierCommand, applyChangeAlias, applyChange)
infoAboutSimulationMode(writer, *applyChange, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil {
return
}
fromDiskType := types.ToDiskType(*source)
toDiskType := types.ToDiskType(*target)

Loading…
Cancel
Save