Browse Source

Merge branch 'master' into allow_delete_objects_by_TTL

pull/7426/head
Konstantin Lebedev 4 weeks ago
committed by GitHub
parent
commit
7c41795078
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 72
      k8s/charts/seaweedfs/values.yaml
  2. 120
      weed/admin/static/css/admin.css
  3. 2
      weed/command/volume.go
  4. 4
      weed/server/constants/volume.go
  5. 4
      weed/server/volume_grpc_client_to_master.go
  6. 6
      weed/server/volume_server.go
  7. 7
      weed/shell/command_volume_check_disk.go
  8. 7
      weed/shell/command_volume_check_disk_test.go
  9. 13
      weed/topology/disk.go
  10. 114
      weed/topology/topology_test.go
  11. 4
      weed/topology/volume_growth.go

72
k8s/charts/seaweedfs/values.yaml

@ -235,27 +235,27 @@ master:
ingress:
enabled: false
className: "nginx"
className: ""
# host: false for "*" hostname
host: "master.seaweedfs.local"
path: "/sw-master/?(.*)"
pathType: ImplementationSpecific
annotations:
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
annotations: {}
# nginx.ingress.kubernetes.io/auth-type: "basic"
# nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
# nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
# nginx.ingress.kubernetes.io/service-upstream: "true"
# nginx.ingress.kubernetes.io/rewrite-target: /$1
# nginx.ingress.kubernetes.io/use-regex: "true"
# nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
# nginx.ingress.kubernetes.io/ssl-redirect: "false"
# nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
# nginx.ingress.kubernetes.io/configuration-snippet: |
# sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
# sub_filter '="/' '="./'; #make absolute paths to relative
# sub_filter '=/' '=./';
# sub_filter '/seaweedfsstatic' './seaweedfsstatic';
# sub_filter_once off;
tls: []
extraEnvironmentVars:
@ -769,28 +769,28 @@ filer:
ingress:
enabled: false
className: "nginx"
className: ""
# host: false for "*" hostname
host: "seaweedfs.cluster.local"
path: "/sw-filer/?(.*)"
pathType: ImplementationSpecific
annotations:
nginx.ingress.kubernetes.io/backend-protocol: GRPC
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
annotations: {}
# nginx.ingress.kubernetes.io/backend-protocol: GRPC
# nginx.ingress.kubernetes.io/auth-type: "basic"
# nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
# nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
# nginx.ingress.kubernetes.io/service-upstream: "true"
# nginx.ingress.kubernetes.io/rewrite-target: /$1
# nginx.ingress.kubernetes.io/use-regex: "true"
# nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
# nginx.ingress.kubernetes.io/ssl-redirect: "false"
# nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
# nginx.ingress.kubernetes.io/configuration-snippet: |
# sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
# sub_filter '="/' '="./'; #make absolute paths to relative
# sub_filter '=/' '=./';
# sub_filter '/seaweedfsstatic' './seaweedfsstatic';
# sub_filter_once off;
# extraEnvVars is a list of extra environment variables to set with the stateful set.
extraEnvironmentVars:
@ -1009,7 +1009,7 @@ s3:
ingress:
enabled: false
className: "nginx"
className: ""
# host: false for "*" hostname
host: "seaweedfs.cluster.local"
path: "/"

120
weed/admin/static/css/admin.css

@ -1,5 +1,14 @@
/* SeaweedFS Dashboard Custom Styles */
/* Link colors - muted */
a {
color: #5b7c99;
}
a:hover {
color: #4a6a88;
}
/* Sidebar Styles */
.sidebar {
position: fixed;
@ -23,11 +32,11 @@
}
.sidebar .nav-link:hover {
color: #007bff;
color: #5b7c99;
}
.sidebar .nav-link.active {
color: #007bff;
color: #5b7c99;
}
.sidebar .nav-link:hover .feather,
@ -51,23 +60,23 @@ main {
/* Custom card styles */
.border-left-primary {
border-left: 0.25rem solid #4e73df !important;
border-left: 0.25rem solid #6b8caf !important;
}
.border-left-success {
border-left: 0.25rem solid #1cc88a !important;
border-left: 0.25rem solid #5a8a72 !important;
}
.border-left-info {
border-left: 0.25rem solid #36b9cc !important;
border-left: 0.25rem solid #6a9aaa !important;
}
.border-left-warning {
border-left: 0.25rem solid #f6c23e !important;
border-left: 0.25rem solid #b8995e !important;
}
.border-left-danger {
border-left: 0.25rem solid #e74a3b !important;
border-left: 0.25rem solid #a5615c !important;
}
/* Status badges */
@ -75,6 +84,89 @@ main {
font-size: 0.875em;
}
/* Muted badge colors - override Bootstrap defaults */
.badge.bg-primary,
.bg-primary {
background-color: #6b8caf !important;
}
.badge.bg-success,
.bg-success {
background-color: #5a8a72 !important;
}
.badge.bg-info,
.bg-info {
background-color: #6a9aaa !important;
}
.badge.bg-warning,
.bg-warning {
background-color: #b8995e !important;
}
.badge.bg-danger,
.bg-danger {
background-color: #a5615c !important;
}
.badge.bg-secondary,
.bg-secondary {
background-color: #7a7d85 !important;
}
/* Muted card background colors for text-bg-* utility classes */
.text-bg-primary,
.card.text-bg-primary {
background-color: #6b8caf !important;
color: #fff !important;
}
.text-bg-success,
.card.text-bg-success {
background-color: #5a8a72 !important;
color: #fff !important;
}
.text-bg-info,
.card.text-bg-info {
background-color: #6a9aaa !important;
color: #fff !important;
}
.text-bg-warning,
.card.text-bg-warning {
background-color: #b8995e !important;
color: #fff !important;
}
.text-bg-danger,
.card.text-bg-danger {
background-color: #a5615c !important;
color: #fff !important;
}
/* Muted text color utilities */
.text-primary {
color: #6b8caf !important;
}
.text-success {
color: #5a8a72 !important;
}
.text-info {
color: #6a9aaa !important;
}
.text-warning {
color: #b8995e !important;
}
.text-danger {
color: #a5615c !important;
}
/* Progress bars */
.progress {
background-color: #f8f9fc;
@ -123,13 +215,13 @@ main {
/* Buttons */
.btn-primary {
background-color: #4e73df;
border-color: #4e73df;
background-color: #6b8caf;
border-color: #6b8caf;
}
.btn-primary:hover {
background-color: #2e59d9;
border-color: #2653d4;
background-color: #5b7c99;
border-color: #5b7c99;
}
/* Text utilities */
@ -163,7 +255,7 @@ main {
/* Custom utilities */
.bg-gradient-primary {
background: linear-gradient(180deg, #4e73df 10%, #224abe 100%);
background: linear-gradient(180deg, #6b8caf 10%, #5b7c99 100%);
}
.shadow {
@ -184,11 +276,11 @@ main {
}
.nav-link[data-bs-toggle="collapse"]:not(.collapsed) {
color: #007bff;
color: #5b7c99;
}
.nav-link[data-bs-toggle="collapse"]:not(.collapsed) .fa-chevron-down {
color: #007bff;
color: #5b7c99;
}
/* Submenu styles */

2
weed/command/volume.go

@ -258,7 +258,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
v.folders, v.folderMaxLimits, minFreeSpaces, diskTypes,
*v.idxFolder,
volumeNeedleMapKind,
v.masters, constants.VolumePulseSeconds, *v.dataCenter, *v.rack,
v.masters, constants.VolumePulsePeriod, *v.dataCenter, *v.rack,
v.whiteList,
*v.fixJpgOrientation, *v.readMode,
*v.compactionMBPerSecond,

4
weed/server/constants/volume.go

@ -1,5 +1,7 @@
package constants
import "time"
const (
VolumePulseSeconds = 5
VolumePulsePeriod = 5 * time.Second
)

4
weed/server/volume_grpc_client_to_master.go

@ -68,7 +68,7 @@ func (vs *VolumeServer) heartbeat() {
master = newLeader
}
vs.store.MasterAddress = master
newLeader, err = vs.doHeartbeatWithRetry(master, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second, duplicateRetryCount)
newLeader, err = vs.doHeartbeatWithRetry(master, grpcDialOption, vs.pulsePeriod, duplicateRetryCount)
if err != nil {
glog.V(0).Infof("heartbeat to %s error: %v", master, err)
@ -81,7 +81,7 @@ func (vs *VolumeServer) heartbeat() {
} else {
// Regular error, reset duplicate retry count
duplicateRetryCount = 0
time.Sleep(time.Duration(vs.pulseSeconds) * time.Second)
time.Sleep(vs.pulsePeriod)
}
newLeader = ""

6
weed/server/volume_server.go

@ -35,7 +35,7 @@ type VolumeServer struct {
SeedMasterNodes []pb.ServerAddress
whiteList []string
currentMaster pb.ServerAddress
pulseSeconds int
pulsePeriod time.Duration
dataCenter string
rack string
store *storage.Store
@ -59,7 +59,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
folders []string, maxCounts []int32, minFreeSpaces []util.MinFreeSpace, diskTypes []types.DiskType,
idxFolder string,
needleMapKind storage.NeedleMapKind,
masterNodes []pb.ServerAddress, pulseSeconds int,
masterNodes []pb.ServerAddress, pulsePeriod time.Duration,
dataCenter string, rack string,
whiteList []string,
fixJpgOrientation bool,
@ -86,7 +86,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
readExpiresAfterSec := v.GetInt("jwt.signing.read.expires_after_seconds")
vs := &VolumeServer{
pulseSeconds: pulseSeconds,
pulsePeriod: pulsePeriod,
dataCenter: dataCenter,
rack: rack,
needleMapKind: needleMapKind,

7
weed/shell/command_volume_check_disk.go

@ -88,7 +88,8 @@ func (c *commandVolumeCheckDisk) eqVolumeFileCount(a, b *VolumeReplica) (bool, b
return fileCountA == fileCountB, fileDeletedCountA == fileDeletedCountB
}
func (c *commandVolumeCheckDisk) shouldSkipVolume(a, b *VolumeReplica, pulseTimeAtSecond int64, syncDeletions, verbose bool) bool {
func (c *commandVolumeCheckDisk) shouldSkipVolume(a, b *VolumeReplica, pulseTime time.Time, syncDeletions, verbose bool) bool {
pulseTimeAtSecond := pulseTime.Unix()
doSyncDeletedCount := false
if syncDeletions && a.info.DeleteCount != b.info.DeleteCount {
doSyncDeletedCount = true
@ -135,7 +136,7 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
c.writer = writer
// collect topology information
pulseTimeAtSecond := time.Now().Unix() - constants.VolumePulseSeconds*2
pulseTime := time.Now().Add(-constants.VolumePulsePeriod * 2)
topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
if err != nil {
return err
@ -162,7 +163,7 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
})
for len(writableReplicas) >= 2 {
a, b := writableReplicas[0], writableReplicas[1]
if !*slowMode && c.shouldSkipVolume(a, b, pulseTimeAtSecond, *syncDeletions, *verbose) {
if !*slowMode && c.shouldSkipVolume(a, b, pulseTime, *syncDeletions, *verbose) {
// always choose the larger volume to be the source
writableReplicas = append(replicas[:1], writableReplicas[2:]...)
continue

7
weed/shell/command_volume_check_disk_test.go

@ -1,9 +1,11 @@
package shell
import (
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"os"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
)
type testCommandVolumeCheckDisk struct {
@ -65,7 +67,8 @@ func TestShouldSkipVolume(t *testing.T) {
},
}
for num, tt := range tests {
if isShould := cmdVolumeCheckDisk.shouldSkipVolume(&tt.a, &tt.b, tt.pulseTimeAtSecond, true, true); isShould != tt.shouldSkipVolume {
pulseTime := time.Unix(tt.pulseTimeAtSecond, 0)
if isShould := cmdVolumeCheckDisk.shouldSkipVolume(&tt.a, &tt.b, pulseTime, true, true); isShould != tt.shouldSkipVolume {
t.Fatalf("result of should skip volume is unexpected for %d test", num)
}
}

13
weed/topology/disk.go

@ -176,6 +176,19 @@ func (d *Disk) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChanged bool)
d.UpAdjustDiskUsageDelta(types.ToDiskType(v.DiskType), deltaDiskUsage)
}
isChanged = d.volumes[v.Id].ReadOnly != v.ReadOnly
if isChanged {
// Adjust active volume count when ReadOnly status changes
// Use a separate delta object to avoid affecting other metric adjustments
readOnlyDelta := &DiskUsageCounts{}
if v.ReadOnly {
// Changed from writable to read-only
readOnlyDelta.activeVolumeCount = -1
} else {
// Changed from read-only to writable
readOnlyDelta.activeVolumeCount = 1
}
d.UpAdjustDiskUsageDelta(types.ToDiskType(v.DiskType), readOnlyDelta)
}
d.volumes[v.Id] = v
}
return

114
weed/topology/topology_test.go

@ -211,6 +211,120 @@ func TestAddRemoveVolume(t *testing.T) {
}
}
func TestVolumeReadOnlyStatusChange(t *testing.T) {
topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
dc := topo.GetOrCreateDataCenter("dc1")
rack := dc.GetOrCreateRack("rack1")
maxVolumeCounts := make(map[string]uint32)
maxVolumeCounts[""] = 25
dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, 0, "127.0.0.1", maxVolumeCounts)
// Create a writable volume
v := storage.VolumeInfo{
Id: needle.VolumeId(1),
Size: 100,
Collection: "",
DiskType: "",
FileCount: 10,
DeleteCount: 0,
DeletedByteCount: 0,
ReadOnly: false, // Initially writable
Version: needle.GetCurrentVersion(),
ReplicaPlacement: &super_block.ReplicaPlacement{},
Ttl: needle.EMPTY_TTL,
}
dn.UpdateVolumes([]storage.VolumeInfo{v})
topo.RegisterVolumeLayout(v, dn)
// Check initial active count (should be 1 since volume is writable)
usageCounts := topo.diskUsages.usages[types.HardDriveType]
assert(t, "initial activeVolumeCount", int(usageCounts.activeVolumeCount), 1)
assert(t, "initial remoteVolumeCount", int(usageCounts.remoteVolumeCount), 0)
// Change volume to read-only
v.ReadOnly = true
dn.UpdateVolumes([]storage.VolumeInfo{v})
// Check active count after marking read-only (should be 0)
usageCounts = topo.diskUsages.usages[types.HardDriveType]
assert(t, "activeVolumeCount after read-only", int(usageCounts.activeVolumeCount), 0)
// Change volume back to writable
v.ReadOnly = false
dn.UpdateVolumes([]storage.VolumeInfo{v})
// Check active count after marking writable again (should be 1)
usageCounts = topo.diskUsages.usages[types.HardDriveType]
assert(t, "activeVolumeCount after writable again", int(usageCounts.activeVolumeCount), 1)
}
func TestVolumeReadOnlyAndRemoteStatusChange(t *testing.T) {
topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
dc := topo.GetOrCreateDataCenter("dc1")
rack := dc.GetOrCreateRack("rack1")
maxVolumeCounts := make(map[string]uint32)
maxVolumeCounts[""] = 25
dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, 0, "127.0.0.1", maxVolumeCounts)
// Create a writable, local volume
v := storage.VolumeInfo{
Id: needle.VolumeId(1),
Size: 100,
Collection: "",
DiskType: "",
FileCount: 10,
DeleteCount: 0,
DeletedByteCount: 0,
ReadOnly: false, // Initially writable
RemoteStorageName: "", // Initially local
Version: needle.GetCurrentVersion(),
ReplicaPlacement: &super_block.ReplicaPlacement{},
Ttl: needle.EMPTY_TTL,
}
dn.UpdateVolumes([]storage.VolumeInfo{v})
topo.RegisterVolumeLayout(v, dn)
// Check initial counts
usageCounts := topo.diskUsages.usages[types.HardDriveType]
assert(t, "initial activeVolumeCount", int(usageCounts.activeVolumeCount), 1)
assert(t, "initial remoteVolumeCount", int(usageCounts.remoteVolumeCount), 0)
// Simultaneously change to read-only AND remote
v.ReadOnly = true
v.RemoteStorageName = "s3"
v.RemoteStorageKey = "key1"
dn.UpdateVolumes([]storage.VolumeInfo{v})
// Check counts after both changes
usageCounts = topo.diskUsages.usages[types.HardDriveType]
assert(t, "activeVolumeCount after read-only+remote", int(usageCounts.activeVolumeCount), 0)
assert(t, "remoteVolumeCount after read-only+remote", int(usageCounts.remoteVolumeCount), 1)
// Change back to writable but keep remote
v.ReadOnly = false
dn.UpdateVolumes([]storage.VolumeInfo{v})
// Check counts - should be writable (active=1) and still remote
usageCounts = topo.diskUsages.usages[types.HardDriveType]
assert(t, "activeVolumeCount after writable+remote", int(usageCounts.activeVolumeCount), 1)
assert(t, "remoteVolumeCount after writable+remote", int(usageCounts.remoteVolumeCount), 1)
// Change back to local AND read-only simultaneously
v.ReadOnly = true
v.RemoteStorageName = ""
v.RemoteStorageKey = ""
dn.UpdateVolumes([]storage.VolumeInfo{v})
// Check final counts
usageCounts = topo.diskUsages.usages[types.HardDriveType]
assert(t, "final activeVolumeCount", int(usageCounts.activeVolumeCount), 0)
assert(t, "final remoteVolumeCount", int(usageCounts.remoteVolumeCount), 0)
}
func TestListCollections(t *testing.T) {
rp, _ := super_block.NewReplicaPlacementFromString("002")

4
weed/topology/volume_growth.go

@ -152,9 +152,9 @@ func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topolo
}
}()
for !topo.LastLeaderChangeTime.Add(constants.VolumePulseSeconds * 2).Before(time.Now()) {
for !topo.LastLeaderChangeTime.Add(constants.VolumePulsePeriod * 2).Before(time.Now()) {
glog.V(0).Infof("wait for volume servers to join back")
time.Sleep(constants.VolumePulseSeconds / 2)
time.Sleep(constants.VolumePulsePeriod / 2)
}
vid, raftErr := topo.NextVolumeId()
if raftErr != nil {

Loading…
Cancel
Save