Browse Source
Humanize output for `weed.server` by default (#7758)
Humanize output for `weed.server` by default (#7758)
* Implement a `weed shell` command to return a status overview of the cluster. Detailed file information will be implemented in a follow-up MR. Note also that masters are currently not reporting back EC shard sizes correctly, via `master_pb.VolumeEcShardInformationMessage.shard_sizes`. F.ex: ``` > status cluster: id: topo status: LOCKED nodes: 10 topology: 1 DC(s)s, 1 disk(s) on 1 rack(s) volumes: total: 3 volumes on 1 collections max size: 31457280000 bytes regular: 2/80 volumes on 6 replicas, 6 writable (100.00%), 0 read-only (0.00%) EC: 1 EC volumes on 14 shards (14.00 shards/volume) storage: total: 186024424 bytes regular volumes: 186024424 bytes EC volumes: 0 bytes raw: 558073152 bytes on volume replicas, 0 bytes on EC shard files ``` * Humanize output for `weed.server` by default. Makes things more readable :) ``` > cluster.status cluster: id: topo status: LOCKED nodes: 10 topology: 1 DC, 10 disks on 1 rack volumes: total: 3 volumes, 1 collection max size: 32 GB regular: 2/80 volumes on 6 replicas, 6 writable (100%), 0 read-only (0%) EC: 1 EC volume on 14 shards (14 shards/volume) storage: total: 172 MB regular volumes: 172 MB EC volumes: 0 B raw: 516 MB on volume replicas, 0 B on EC shards ``` ``` > cluster.status --humanize=false cluster: id: topo status: LOCKED nodes: 10 topology: 1 DC(s), 10 disk(s) on 1 rack(s) volumes: total: 3 volume(s), 1 collection(s) max size: 31457280000 byte(s) regular: 2/80 volume(s) on 6 replica(s), 5 writable (83.33%), 1 read-only (16.67%) EC: 1 EC volume(s) on 14 shard(s) (14.00 shards/volume) storage: total: 172128072 byte(s) regular volumes: 172128072 byte(s) EC volumes: 0 byte(s) raw: 516384216 byte(s) on volume replicas, 0 byte(s) on EC shards ``` Also adds unit tests, and reshuffles test files handling for clarity.pull/7772/head
committed by
GitHub
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 306 additions and 95 deletions
-
135weed/shell/command_cluster_status.go
-
140weed/shell/command_cluster_status_test.go
-
82weed/shell/command_ec_common_test.go
-
16weed/shell/command_volume_list_test.go
-
20weed/shell/common_test.go
-
4weed/shell/volume.ecshards.txt
-
2weed/shell/volume.list.txt
-
2weed/shell/volume.list2.txt
@ -0,0 +1,140 @@ |
|||
package shell |
|||
|
|||
import ( |
|||
"bytes" |
|||
"testing" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb" |
|||
) |
|||
|
|||
func TestPrintClusterInfo(t *testing.T) { |
|||
testCases := []struct { |
|||
topology *master_pb.TopologyInfo |
|||
humanize bool |
|||
want string |
|||
}{ |
|||
{ |
|||
testTopology1, true, |
|||
`cluster: |
|||
id: test_topo_1 |
|||
status: unlocked |
|||
nodes: 5 |
|||
topology: 5 DCs, 5 disks on 6 racks |
|||
|
|||
`, |
|||
}, |
|||
{ |
|||
testTopology1, false, |
|||
`cluster: |
|||
id: test_topo_1 |
|||
status: unlocked |
|||
nodes: 5 |
|||
topology: 5 DC(s), 5 disk(s) on 6 rack(s) |
|||
|
|||
`, |
|||
}, |
|||
} |
|||
|
|||
for _, tc := range testCases { |
|||
var buf bytes.Buffer |
|||
sp := &ClusterStatusPrinter{ |
|||
writer: &buf, |
|||
humanize: tc.humanize, |
|||
topology: tc.topology, |
|||
} |
|||
sp.printClusterInfo() |
|||
got := buf.String() |
|||
|
|||
if got != tc.want { |
|||
t.Errorf("for %v: got %v, want %v", tc.topology.Id, got, tc.want) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestPrintVolumeInfo(t *testing.T) { |
|||
testCases := []struct { |
|||
topology *master_pb.TopologyInfo |
|||
humanize bool |
|||
want string |
|||
}{ |
|||
{ |
|||
testTopology2, true, |
|||
`volumes: |
|||
total: 12,056 volumes, 0 collections |
|||
max size: 0 B |
|||
regular: 5,302/25,063 volumes on 15,900 replicas, 15,900 writable (100%), 0 read-only (0%) |
|||
EC: 6,754 EC volumes on 91,662 shards (13.57 shards/volume) |
|||
|
|||
`, |
|||
}, |
|||
{ |
|||
testTopology2, false, |
|||
`volumes: |
|||
total: 12056 volume(s), 0 collection(s) |
|||
max size: 0 byte(s) |
|||
regular: 5302/25063 volume(s) on 15900 replica(s), 15900 writable (100.00%), 0 read-only (0.00%) |
|||
EC: 6754 EC volume(s) on 91662 shard(s) (13.57 shards/volume) |
|||
|
|||
`, |
|||
}, |
|||
} |
|||
|
|||
for _, tc := range testCases { |
|||
var buf bytes.Buffer |
|||
sp := &ClusterStatusPrinter{ |
|||
writer: &buf, |
|||
humanize: tc.humanize, |
|||
topology: tc.topology, |
|||
} |
|||
sp.printVolumeInfo() |
|||
got := buf.String() |
|||
|
|||
if got != tc.want { |
|||
t.Errorf("for %v: got %v, want %v", tc.topology.Id, got, tc.want) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestPrintStorageInfo(t *testing.T) { |
|||
testCases := []struct { |
|||
topology *master_pb.TopologyInfo |
|||
humanize bool |
|||
want string |
|||
}{ |
|||
{ |
|||
testTopology2, true, |
|||
`storage: |
|||
total: 5.9 TB |
|||
regular volumes: 5.9 TB |
|||
EC volumes: 0 B |
|||
raw: 18 TB on volume replicas, 0 B on EC shards |
|||
|
|||
`, |
|||
}, |
|||
{ |
|||
testTopology2, false, |
|||
`storage: |
|||
total: 5892610895448 byte(s) |
|||
regular volumes: 5892610895448 byte(s) |
|||
EC volumes: 0 byte(s) |
|||
raw: 17676186754616 byte(s) on volume replicas, 0 byte(s) on EC shards |
|||
|
|||
`, |
|||
}, |
|||
} |
|||
|
|||
for _, tc := range testCases { |
|||
var buf bytes.Buffer |
|||
sp := &ClusterStatusPrinter{ |
|||
writer: &buf, |
|||
humanize: tc.humanize, |
|||
topology: tc.topology, |
|||
} |
|||
sp.printStorageInfo() |
|||
got := buf.String() |
|||
|
|||
if got != tc.want { |
|||
t.Errorf("for %v: got %v, want %v", tc.topology.Id, got, tc.want) |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,20 @@ |
|||
package shell |
|||
|
|||
import ( |
|||
_ "embed" |
|||
) |
|||
|
|||
//go:embed volume.list.txt
|
|||
var topoData string |
|||
|
|||
//go:embed volume.list2.txt
|
|||
var topoData2 string |
|||
|
|||
//go:embed volume.ecshards.txt
|
|||
var topoDataEc string |
|||
|
|||
var ( |
|||
testTopology1 = parseOutput(topoData) |
|||
testTopology2 = parseOutput(topoData2) |
|||
testTopologyEc = parseOutput(topoDataEc) |
|||
) |
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue