Browse Source

feat: Phase 9A -- Kubernetes operator MVP for SeaweedFS block storage, 71 tests

Nested Go module (operator/go.mod) isolating controller-runtime deps.
CRD SeaweedBlockCluster (block.seaweedfs.com/v1alpha1) with dual-mode:
CSI-only (MasterRef) connects to existing cluster; full-stack (Master)
deploys master+volume StatefulSets. Single reconciler manages all
sub-resources with ownership labels, finalizer cleanup, CHAP secret
auto-generation, and multi-CR conflict detection.

Review fixes: cross-NS label ownership (H1), ParseQuantity validation (H2),
volume readiness probe (M1), leader election (M2), PVC StorageClassName (M3),
condition type separation (M4), FQDN master address (L1), port validation (L3).

QA adversarial fixes: ExtraArgs override rejection (BUG-QA-1), malformed
lastRotated infinite rotation (BUG-QA-2), DNS label length validation
(BUG-QA-3), replicas=0 error message (BUG-QA-4), RFC 1123 name validation
(BUG-QA-5), whitespace field trimming (BUG-QA-6), zero storage size (BUG-QA-7).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
feature/sw-block
Ping Qiu 1 day ago
parent
commit
8fa1829992
  1. 12
      weed/storage/blockvol/operator/Dockerfile
  2. 25
      weed/storage/blockvol/operator/Makefile
  3. 22
      weed/storage/blockvol/operator/api/v1alpha1/groupversion_info.go
  4. 233
      weed/storage/blockvol/operator/api/v1alpha1/types.go
  5. 270
      weed/storage/blockvol/operator/api/v1alpha1/zz_generated.deepcopy.go
  6. 161
      weed/storage/blockvol/operator/config/crd/bases/block.seaweedfs.com_seaweedblockclusters.yaml
  7. 50
      weed/storage/blockvol/operator/config/manager/manager.yaml
  8. 59
      weed/storage/blockvol/operator/config/rbac/role.yaml
  9. 18
      weed/storage/blockvol/operator/config/samples/csi-only.yaml
  10. 32
      weed/storage/blockvol/operator/config/samples/full-stack.yaml
  11. 68
      weed/storage/blockvol/operator/go.mod
  12. 194
      weed/storage/blockvol/operator/go.sum
  13. 78
      weed/storage/blockvol/operator/internal/controller/defaults.go
  14. 813
      weed/storage/blockvol/operator/internal/controller/qa_reconciler_test.go
  15. 657
      weed/storage/blockvol/operator/internal/controller/reconciler.go
  16. 970
      weed/storage/blockvol/operator/internal/controller/reconciler_test.go
  17. 177
      weed/storage/blockvol/operator/internal/controller/validate.go
  18. 110
      weed/storage/blockvol/operator/internal/resources/csi_controller.go
  19. 29
      weed/storage/blockvol/operator/internal/resources/csi_driver.go
  20. 156
      weed/storage/blockvol/operator/internal/resources/csi_node.go
  21. 88
      weed/storage/blockvol/operator/internal/resources/labels.go
  22. 168
      weed/storage/blockvol/operator/internal/resources/master.go
  23. 62
      weed/storage/blockvol/operator/internal/resources/ownership.go
  24. 110
      weed/storage/blockvol/operator/internal/resources/rbac.go
  25. 976
      weed/storage/blockvol/operator/internal/resources/resources_test.go
  26. 100
      weed/storage/blockvol/operator/internal/resources/secret.go
  27. 31
      weed/storage/blockvol/operator/internal/resources/storageclass.go
  28. 204
      weed/storage/blockvol/operator/internal/resources/volume.go
  29. 76
      weed/storage/blockvol/operator/main.go

12
weed/storage/blockvol/operator/Dockerfile

@ -0,0 +1,12 @@
FROM golang:1.24 AS builder
WORKDIR /workspace
COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o operator .
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/operator .
USER 65532:65532
ENTRYPOINT ["/operator"]

25
weed/storage/blockvol/operator/Makefile

@ -0,0 +1,25 @@
IMG ?= sw-block-operator:local
CONTROLLER_GEN ?= $(shell which controller-gen 2>/dev/null)
.PHONY: generate manifests build docker-build test
generate:
$(CONTROLLER_GEN) object paths="./api/..."
manifests:
$(CONTROLLER_GEN) crd paths="./api/..." output:crd:artifacts:config=config/crd/bases
build:
go build -o bin/operator .
docker-build:
docker build -t $(IMG) .
test:
go test ./... -v -count=1
fmt:
go fmt ./...
vet:
go vet ./...

22
weed/storage/blockvol/operator/api/v1alpha1/groupversion_info.go

@ -0,0 +1,22 @@
// Package v1alpha1 contains API Schema definitions for block.seaweedfs.com v1alpha1.
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "block.seaweedfs.com", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionResource scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
func init() {
SchemeBuilder.Register(&SeaweedBlockCluster{}, &SeaweedBlockClusterList{})
}

233
weed/storage/blockvol/operator/api/v1alpha1/types.go

@ -0,0 +1,233 @@
// Package v1alpha1 contains API types for the SeaweedFS Block operator.
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// CSIDriverName is the well-known CSI driver registration name.
CSIDriverName = "block.csi.seaweedfs.com"
// FinalizerName is the finalizer used for cluster-scoped resource cleanup.
FinalizerName = "block.seaweedfs.com/finalizer"
// AnnotationRotateSecret triggers CHAP secret regeneration when set to an RFC 3339 timestamp.
AnnotationRotateSecret = "block.seaweedfs.com/rotate-secret"
// LabelOwnerNamespace identifies the owning CR namespace on cluster-scoped resources.
LabelOwnerNamespace = "block.seaweedfs.com/owner-namespace"
// LabelOwnerName identifies the owning CR name on cluster-scoped resources.
LabelOwnerName = "block.seaweedfs.com/owner-name"
// AnnotationLastRotated records the timestamp of the last secret rotation.
AnnotationLastRotated = "block.seaweedfs.com/last-rotated"
// Default values.
DefaultImage = "chrislusf/seaweedfs:latest"
DefaultCSIImage = "sw-block-csi:local"
DefaultMasterPort = 9333
DefaultMasterGRPCPort = 19333
DefaultVolumePort = 8080
DefaultVolumeGRPCPort = 18080
DefaultBlockListenPort = 3260
DefaultBlockDir = "/data1/block"
DefaultStorageClassName = "sw-block"
DefaultCSINamespace = "kube-system"
DefaultProvisionerImage = "registry.k8s.io/sig-storage/csi-provisioner:v5.1.0"
DefaultRegistrarImage = "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.12.0"
DefaultControllerReplicas = 1
DefaultMasterReplicas = 1
DefaultVolumeReplicas = 1
DefaultImagePullPolicy = corev1.PullIfNotPresent
)
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:shortName=sbc
// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`
// +kubebuilder:printcolumn:name="Master",type=string,JSONPath=`.status.masterAddress`
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
// SeaweedBlockCluster is the Schema for the seaweedBlockClusters API.
type SeaweedBlockCluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SeaweedBlockClusterSpec `json:"spec,omitempty"`
Status SeaweedBlockClusterStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// SeaweedBlockClusterList contains a list of SeaweedBlockCluster.
type SeaweedBlockClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []SeaweedBlockCluster `json:"items"`
}
// SeaweedBlockClusterSpec defines the desired state of SeaweedBlockCluster.
type SeaweedBlockClusterSpec struct {
// Image is the SeaweedFS image for master + volume servers.
// +optional
Image string `json:"image,omitempty"`
// CSIImage is the block CSI driver image.
// +optional
CSIImage string `json:"csiImage,omitempty"`
// Master configures a full-stack SeaweedFS master deployment.
// Mutually exclusive with MasterRef.
// +optional
Master *MasterSpec `json:"master,omitempty"`
// Volume configures a full-stack SeaweedFS volume deployment.
// Requires Master to be set (full-stack mode).
// +optional
Volume *VolumeSpec `json:"volume,omitempty"`
// MasterRef points to an existing external SeaweedFS master.
// Mutually exclusive with Master.
// +optional
MasterRef *MasterRef `json:"masterRef,omitempty"`
// CSI configures the CSI driver components.
// +optional
CSI *CSISpec `json:"csi,omitempty"`
// Auth configures CHAP authentication.
// +optional
Auth *AuthSpec `json:"auth,omitempty"`
// StorageClassName is the name of the StorageClass to create.
// +optional
StorageClassName string `json:"storageClassName,omitempty"`
// CSINamespace is the namespace for CSI components (controller, node, SA).
// +optional
CSINamespace string `json:"csiNamespace,omitempty"`
// AdoptExistingStorageClass allows the operator to adopt a pre-existing
// StorageClass that was created outside this operator.
// +optional
AdoptExistingStorageClass bool `json:"adoptExistingStorageClass,omitempty"`
}
// MasterSpec configures the SeaweedFS master StatefulSet (full-stack mode).
type MasterSpec struct {
// Replicas is the number of master replicas. Only 1 is supported in 9A.
// +optional
Replicas *int32 `json:"replicas,omitempty"`
// Port is the HTTP port for the master.
// +optional
Port int32 `json:"port,omitempty"`
// GRPCPort is the gRPC port for the master.
// +optional
GRPCPort int32 `json:"grpcPort,omitempty"`
// Storage configures the PVC for /data.
// +optional
Storage *StorageSpec `json:"storage,omitempty"`
// Resources defines compute resources for the master container.
// +optional
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// ExtraArgs are additional command-line arguments.
// +optional
ExtraArgs []string `json:"extraArgs,omitempty"`
}
// VolumeSpec configures the SeaweedFS volume StatefulSet (full-stack mode).
type VolumeSpec struct {
// Replicas is the number of volume server replicas.
// +optional
Replicas *int32 `json:"replicas,omitempty"`
// Port is the HTTP port.
// +optional
Port int32 `json:"port,omitempty"`
// GRPCPort is the gRPC port.
// +optional
GRPCPort int32 `json:"grpcPort,omitempty"`
// BlockDir is the directory for .blk block volume files.
// +optional
BlockDir string `json:"blockDir,omitempty"`
// BlockListenPort is the iSCSI target listen port.
// +optional
BlockListenPort int32 `json:"blockListenPort,omitempty"`
// PortalOverride overrides the auto-derived iSCSI portal address.
// +optional
PortalOverride string `json:"portalOverride,omitempty"`
// Storage configures the PVC for /data1.
// +optional
Storage *StorageSpec `json:"storage,omitempty"`
// Resources defines compute resources for the volume container.
// +optional
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// ExtraArgs are additional command-line arguments.
// +optional
ExtraArgs []string `json:"extraArgs,omitempty"`
}
// StorageSpec defines PVC storage configuration.
type StorageSpec struct {
// Size is the requested storage size (e.g. "5Gi").
Size string `json:"size,omitempty"`
// StorageClassName is the storage class for the PVC.
// +optional
StorageClassName *string `json:"storageClassName,omitempty"`
}
// MasterRef points to an existing SeaweedFS master (CSI-only mode).
type MasterRef struct {
// Address is the master address (e.g. "seaweedfs-master.default:9333").
Address string `json:"address"`
}
// CSISpec configures CSI driver components.
type CSISpec struct {
// ControllerReplicas is the number of CSI controller replicas.
// +optional
ControllerReplicas *int32 `json:"controllerReplicas,omitempty"`
// ProvisionerImage is the CSI provisioner sidecar image.
// +optional
ProvisionerImage string `json:"provisionerImage,omitempty"`
// RegistrarImage is the CSI node-driver-registrar sidecar image.
// +optional
RegistrarImage string `json:"registrarImage,omitempty"`
}
// AuthSpec configures CHAP authentication.
type AuthSpec struct {
// SecretRef references a pre-existing CHAP secret.
// If nil, the operator auto-generates one.
// +optional
SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"`
}
// SeaweedBlockClusterStatus defines the observed state of SeaweedBlockCluster.
type SeaweedBlockClusterStatus struct {
// Phase is the high-level cluster state.
// +optional
Phase ClusterPhase `json:"phase,omitempty"`
// MasterAddress is the resolved master address.
// +optional
MasterAddress string `json:"masterAddress,omitempty"`
// Conditions represent the latest observations of the cluster's state.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// ClusterPhase represents the lifecycle phase of the cluster.
// +kubebuilder:validation:Enum=Pending;Running;Failed
type ClusterPhase string
const (
PhasePending ClusterPhase = "Pending"
PhaseRunning ClusterPhase = "Running"
PhaseFailed ClusterPhase = "Failed"
)
// Condition types.
const (
ConditionMasterReady = "MasterReady"
ConditionVolumeReady = "VolumeReady"
ConditionCSIReady = "CSIReady"
ConditionAuthConfigured = "AuthConfigured"
ConditionResourceConflict = "ResourceConflict"
ConditionValidationFailed = "ValidationFailed"
)

270
weed/storage/blockvol/operator/api/v1alpha1/zz_generated.deepcopy.go

@ -0,0 +1,270 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuthSpec) DeepCopyInto(out *AuthSpec) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(v1.LocalObjectReference)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSpec.
func (in *AuthSpec) DeepCopy() *AuthSpec {
if in == nil {
return nil
}
out := new(AuthSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSISpec) DeepCopyInto(out *CSISpec) {
*out = *in
if in.ControllerReplicas != nil {
in, out := &in.ControllerReplicas, &out.ControllerReplicas
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISpec.
func (in *CSISpec) DeepCopy() *CSISpec {
if in == nil {
return nil
}
out := new(CSISpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MasterRef) DeepCopyInto(out *MasterRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterRef.
func (in *MasterRef) DeepCopy() *MasterRef {
if in == nil {
return nil
}
out := new(MasterRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MasterSpec) DeepCopyInto(out *MasterSpec) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
**out = **in
}
if in.Storage != nil {
in, out := &in.Storage, &out.Storage
*out = new(StorageSpec)
(*in).DeepCopyInto(*out)
}
in.Resources.DeepCopyInto(&out.Resources)
if in.ExtraArgs != nil {
in, out := &in.ExtraArgs, &out.ExtraArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterSpec.
func (in *MasterSpec) DeepCopy() *MasterSpec {
if in == nil {
return nil
}
out := new(MasterSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SeaweedBlockCluster) DeepCopyInto(out *SeaweedBlockCluster) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeaweedBlockCluster.
func (in *SeaweedBlockCluster) DeepCopy() *SeaweedBlockCluster {
if in == nil {
return nil
}
out := new(SeaweedBlockCluster)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SeaweedBlockCluster) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SeaweedBlockClusterList) DeepCopyInto(out *SeaweedBlockClusterList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]SeaweedBlockCluster, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeaweedBlockClusterList.
func (in *SeaweedBlockClusterList) DeepCopy() *SeaweedBlockClusterList {
if in == nil {
return nil
}
out := new(SeaweedBlockClusterList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SeaweedBlockClusterList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SeaweedBlockClusterSpec) DeepCopyInto(out *SeaweedBlockClusterSpec) {
*out = *in
if in.Master != nil {
in, out := &in.Master, &out.Master
*out = new(MasterSpec)
(*in).DeepCopyInto(*out)
}
if in.Volume != nil {
in, out := &in.Volume, &out.Volume
*out = new(VolumeSpec)
(*in).DeepCopyInto(*out)
}
if in.MasterRef != nil {
in, out := &in.MasterRef, &out.MasterRef
*out = new(MasterRef)
**out = **in
}
if in.CSI != nil {
in, out := &in.CSI, &out.CSI
*out = new(CSISpec)
(*in).DeepCopyInto(*out)
}
if in.Auth != nil {
in, out := &in.Auth, &out.Auth
*out = new(AuthSpec)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeaweedBlockClusterSpec.
func (in *SeaweedBlockClusterSpec) DeepCopy() *SeaweedBlockClusterSpec {
if in == nil {
return nil
}
out := new(SeaweedBlockClusterSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SeaweedBlockClusterStatus) DeepCopyInto(out *SeaweedBlockClusterStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeaweedBlockClusterStatus.
func (in *SeaweedBlockClusterStatus) DeepCopy() *SeaweedBlockClusterStatus {
if in == nil {
return nil
}
out := new(SeaweedBlockClusterStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageSpec) DeepCopyInto(out *StorageSpec) {
*out = *in
if in.StorageClassName != nil {
in, out := &in.StorageClassName, &out.StorageClassName
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec.
func (in *StorageSpec) DeepCopy() *StorageSpec {
if in == nil {
return nil
}
out := new(StorageSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeSpec) DeepCopyInto(out *VolumeSpec) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
**out = **in
}
if in.Storage != nil {
in, out := &in.Storage, &out.Storage
*out = new(StorageSpec)
(*in).DeepCopyInto(*out)
}
in.Resources.DeepCopyInto(&out.Resources)
if in.ExtraArgs != nil {
in, out := &in.ExtraArgs, &out.ExtraArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSpec.
func (in *VolumeSpec) DeepCopy() *VolumeSpec {
if in == nil {
return nil
}
out := new(VolumeSpec)
in.DeepCopyInto(out)
return out
}

161
weed/storage/blockvol/operator/config/crd/bases/block.seaweedfs.com_seaweedblockclusters.yaml

@ -0,0 +1,161 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: seaweedblockclusters.block.seaweedfs.com
spec:
group: block.seaweedfs.com
names:
kind: SeaweedBlockCluster
listKind: SeaweedBlockClusterList
plural: seaweedblockclusters
singular: seaweedblockcluster
shortNames:
- sbc
scope: Namespaced
versions:
- name: v1alpha1
served: true
storage: true
additionalPrinterColumns:
- name: Phase
type: string
jsonPath: .status.phase
- name: Master
type: string
jsonPath: .status.masterAddress
- name: Age
type: date
jsonPath: .metadata.creationTimestamp
subresources:
status: {}
schema:
openAPIV3Schema:
type: object
properties:
apiVersion:
type: string
kind:
type: string
metadata:
type: object
spec:
type: object
properties:
image:
type: string
csiImage:
type: string
master:
type: object
properties:
replicas:
type: integer
format: int32
port:
type: integer
format: int32
grpcPort:
type: integer
format: int32
storage:
type: object
properties:
size:
type: string
storageClassName:
type: string
resources:
type: object
x-kubernetes-preserve-unknown-fields: true
extraArgs:
type: array
items:
type: string
volume:
type: object
properties:
replicas:
type: integer
format: int32
port:
type: integer
format: int32
grpcPort:
type: integer
format: int32
blockDir:
type: string
blockListenPort:
type: integer
format: int32
portalOverride:
type: string
storage:
type: object
properties:
size:
type: string
storageClassName:
type: string
resources:
type: object
x-kubernetes-preserve-unknown-fields: true
extraArgs:
type: array
items:
type: string
masterRef:
type: object
required: ["address"]
properties:
address:
type: string
csi:
type: object
properties:
controllerReplicas:
type: integer
format: int32
provisionerImage:
type: string
registrarImage:
type: string
auth:
type: object
properties:
secretRef:
type: object
properties:
name:
type: string
storageClassName:
type: string
csiNamespace:
type: string
adoptExistingStorageClass:
type: boolean
status:
type: object
properties:
phase:
type: string
enum: ["Pending", "Running", "Failed"]
masterAddress:
type: string
conditions:
type: array
items:
type: object
properties:
type:
type: string
status:
type: string
reason:
type: string
message:
type: string
lastTransitionTime:
type: string
format: date-time
required: ["type", "status"]

50
weed/storage/blockvol/operator/config/manager/manager.yaml

@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sw-block-operator
namespace: sw-block-system
labels:
app.kubernetes.io/name: sw-block-operator
app.kubernetes.io/managed-by: kustomize
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: sw-block-operator
template:
metadata:
labels:
app.kubernetes.io/name: sw-block-operator
spec:
serviceAccountName: sw-block-operator
containers:
- name: operator
image: sw-block-operator:local
imagePullPolicy: IfNotPresent
args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=:8080
ports:
- name: metrics
containerPort: 8080
- name: health
containerPort: 8081
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 10m
memory: 64Mi

59
weed/storage/blockvol/operator/config/rbac/role.yaml

@ -0,0 +1,59 @@
apiVersion: v1
kind: Namespace
metadata:
name: sw-block-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: sw-block-operator
namespace: sw-block-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: sw-block-operator
rules:
# SeaweedBlockCluster CR
- apiGroups: ["block.seaweedfs.com"]
resources: ["seaweedblockclusters"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["block.seaweedfs.com"]
resources: ["seaweedblockclusters/status"]
verbs: ["get", "update", "patch"]
- apiGroups: ["block.seaweedfs.com"]
resources: ["seaweedblockclusters/finalizers"]
verbs: ["update"]
# Managed workloads
- apiGroups: ["apps"]
resources: ["deployments", "daemonsets", "statefulsets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
# Core resources
- apiGroups: [""]
resources: ["services", "serviceaccounts", "secrets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
# Cluster-scoped RBAC
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles", "clusterrolebindings"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
# Storage
- apiGroups: ["storage.k8s.io"]
resources: ["csidrivers", "storageclasses"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
# Events (for recorder)
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: sw-block-operator
subjects:
- kind: ServiceAccount
name: sw-block-operator
namespace: sw-block-system
roleRef:
kind: ClusterRole
name: sw-block-operator
apiGroup: rbac.authorization.k8s.io

18
weed/storage/blockvol/operator/config/samples/csi-only.yaml

@ -0,0 +1,18 @@
apiVersion: block.seaweedfs.com/v1alpha1
kind: SeaweedBlockCluster
metadata:
name: my-block-csi
spec:
csiImage: sw-block-csi:local
# CSI-only mode: connect to an existing SeaweedFS cluster
masterRef:
address: "seaweedfs-master.default:9333"
csi:
controllerReplicas: 1
provisionerImage: registry.k8s.io/sig-storage/csi-provisioner:v5.1.0
registrarImage: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.12.0
storageClassName: sw-block
csiNamespace: kube-system

32
weed/storage/blockvol/operator/config/samples/full-stack.yaml

@ -0,0 +1,32 @@
apiVersion: block.seaweedfs.com/v1alpha1
kind: SeaweedBlockCluster
metadata:
name: my-block
spec:
image: chrislusf/seaweedfs:latest
csiImage: sw-block-csi:local
# Full-stack mode: operator deploys master + volume + CSI
master:
replicas: 1
port: 9333
grpcPort: 19333
storage:
size: "5Gi"
volume:
replicas: 1
port: 8080
grpcPort: 18080
blockDir: /data1/block
blockListenPort: 3260
storage:
size: "50Gi"
csi:
controllerReplicas: 1
provisionerImage: registry.k8s.io/sig-storage/csi-provisioner:v5.1.0
registrarImage: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.12.0
storageClassName: sw-block
csiNamespace: kube-system

68
weed/storage/blockvol/operator/go.mod

@ -0,0 +1,68 @@
module github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator
go 1.24.9
require (
k8s.io/api v0.31.4
k8s.io/apimachinery v0.31.4
k8s.io/client-go v0.31.4
sigs.k8s.io/controller-runtime v0.19.4
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.26.0 // indirect
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sys v0.21.0 // indirect
golang.org/x/term v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.3.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.31.0 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)

194
weed/storage/blockvol/operator/go.sum

@ -0,0 +1,194 @@
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.31.4 h1:I2QNzitPVsPeLQvexMEsj945QumYraqv9m74isPDKhM=
k8s.io/api v0.31.4/go.mod h1:d+7vgXLvmcdT1BCo79VEgJxHHryww3V5np2OYTr6jdw=
k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk=
k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk=
k8s.io/apimachinery v0.31.4 h1:8xjE2C4CzhYVm9DGf60yohpNUh5AEBnPxCryPBECmlM=
k8s.io/apimachinery v0.31.4/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/client-go v0.31.4 h1:t4QEXt4jgHIkKKlx06+W3+1JOwAFU/2OPiOo7H92eRQ=
k8s.io/client-go v0.31.4/go.mod h1:kvuMro4sFYIa8sulL5Gi5GFqUPvfH2O/dXuKstbaaeg=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo=
sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

78
weed/storage/blockvol/operator/internal/controller/defaults.go

@ -0,0 +1,78 @@
package controller
import (
"strings"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
)
// applyDefaults fills zero-value fields with defaults.
// BUG-QA-6 fix: TrimSpace before comparing to catch whitespace-only strings.
func applyDefaults(spec *blockv1alpha1.SeaweedBlockClusterSpec) {
spec.Image = strings.TrimSpace(spec.Image)
if spec.Image == "" {
spec.Image = blockv1alpha1.DefaultImage
}
spec.CSIImage = strings.TrimSpace(spec.CSIImage)
if spec.CSIImage == "" {
spec.CSIImage = blockv1alpha1.DefaultCSIImage
}
spec.StorageClassName = strings.TrimSpace(spec.StorageClassName)
if spec.StorageClassName == "" {
spec.StorageClassName = blockv1alpha1.DefaultStorageClassName
}
spec.CSINamespace = strings.TrimSpace(spec.CSINamespace)
if spec.CSINamespace == "" {
spec.CSINamespace = blockv1alpha1.DefaultCSINamespace
}
// CSI defaults
if spec.CSI == nil {
spec.CSI = &blockv1alpha1.CSISpec{}
}
if spec.CSI.ControllerReplicas == nil {
r := int32(blockv1alpha1.DefaultControllerReplicas)
spec.CSI.ControllerReplicas = &r
}
if spec.CSI.ProvisionerImage == "" {
spec.CSI.ProvisionerImage = blockv1alpha1.DefaultProvisionerImage
}
if spec.CSI.RegistrarImage == "" {
spec.CSI.RegistrarImage = blockv1alpha1.DefaultRegistrarImage
}
// Master defaults (full-stack mode)
if spec.Master != nil {
if spec.Master.Replicas == nil {
r := int32(blockv1alpha1.DefaultMasterReplicas)
spec.Master.Replicas = &r
}
if spec.Master.Port == 0 {
spec.Master.Port = blockv1alpha1.DefaultMasterPort
}
if spec.Master.GRPCPort == 0 {
spec.Master.GRPCPort = blockv1alpha1.DefaultMasterGRPCPort
}
}
// Volume defaults (full-stack mode)
if spec.Volume != nil {
if spec.Volume.Replicas == nil {
r := int32(blockv1alpha1.DefaultVolumeReplicas)
spec.Volume.Replicas = &r
}
if spec.Volume.Port == 0 {
spec.Volume.Port = blockv1alpha1.DefaultVolumePort
}
if spec.Volume.GRPCPort == 0 {
spec.Volume.GRPCPort = blockv1alpha1.DefaultVolumeGRPCPort
}
spec.Volume.BlockDir = strings.TrimSpace(spec.Volume.BlockDir)
if spec.Volume.BlockDir == "" {
spec.Volume.BlockDir = blockv1alpha1.DefaultBlockDir
}
if spec.Volume.BlockListenPort == 0 {
spec.Volume.BlockListenPort = blockv1alpha1.DefaultBlockListenPort
}
}
}

813
weed/storage/blockvol/operator/internal/controller/qa_reconciler_test.go

@ -0,0 +1,813 @@
package controller
// Adversarial tests for the SeaweedBlockCluster reconciler.
// Target: edge cases, race-like scenarios, input fuzzing, state transitions.
import (
"context"
"strings"
"testing"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
"github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/internal/resources"
)
// fullStackClusterWithVolume creates a full-stack cluster with both Master and Volume specs.
func fullStackClusterWithVolume() *blockv1alpha1.SeaweedBlockCluster {
replicas := int32(1)
return &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-full",
Namespace: "default",
UID: "uid-fs",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{
Image: "chrislusf/seaweedfs:latest",
CSIImage: "sw-block-csi:local",
Master: &blockv1alpha1.MasterSpec{
Replicas: &replicas,
Port: 9333,
GRPCPort: 19333,
Storage: &blockv1alpha1.StorageSpec{Size: "5Gi"},
},
Volume: &blockv1alpha1.VolumeSpec{
Replicas: &replicas,
Port: 8080,
GRPCPort: 18080,
BlockDir: "/data1/block",
BlockListenPort: 3260,
Storage: &blockv1alpha1.StorageSpec{Size: "50Gi"},
},
StorageClassName: "sw-block-full",
CSINamespace: "kube-system",
},
}
}
// =============================================================================
// QA-1: Cross-namespace resource conflict not detected.
//
// createOrUpdateCrossNamespace doesn't check ownership labels. If CR-A already
// created a CSI controller Deployment in kube-system, CR-B should not silently
// overwrite it.
// =============================================================================
func TestQA_CrossNamespace_ConflictNotDetected(t *testing.T) {
scheme := testScheme()
// CR-A creates resources first
clusterA := csiOnlyCluster()
clusterA.Name = "block-a"
clusterA.UID = "uid-a"
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(clusterA).
WithStatusSubresource(clusterA).
Build()
rA := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, rA, "block-a", "default") // finalizer
reconcile(t, rA, "block-a", "default") // create resources
ctx := context.Background()
// Verify A owns the CSI controller
var dep appsv1.Deployment
if err := c.Get(ctx, types.NamespacedName{Name: "block-a-csi-controller", Namespace: "kube-system"}, &dep); err != nil {
t.Fatalf("CR-A CSI controller should exist: %v", err)
}
if dep.Labels[blockv1alpha1.LabelOwnerName] != "block-a" {
t.Fatalf("CSI controller should be owned by block-a, got %q", dep.Labels[blockv1alpha1.LabelOwnerName])
}
// CR-B tries to reconcile — its CSI controller has a DIFFERENT name (block-b-csi-controller)
// so there's no direct conflict on CSI controller Deployment names. But the shared
// cluster-scoped resources (CSIDriver, ClusterRole, CRB) SHOULD conflict.
clusterB := csiOnlyCluster()
clusterB.Name = "block-b"
clusterB.UID = "uid-b"
if err := c.Create(ctx, clusterB); err != nil {
t.Fatal(err)
}
if err := c.Status().Update(ctx, clusterB); err != nil {
// Status subresource may need explicit setup
}
rB := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, rB, "block-b", "default") // finalizer
reconcile(t, rB, "block-b", "default") // attempt reconcile
// CR-B should fail because cluster-scoped CSIDriver is owned by CR-A
var updatedB blockv1alpha1.SeaweedBlockCluster
if err := c.Get(ctx, types.NamespacedName{Name: "block-b", Namespace: "default"}, &updatedB); err != nil {
t.Fatal(err)
}
if updatedB.Status.Phase != blockv1alpha1.PhaseFailed {
t.Errorf("BUG: CR-B phase = %q, want Failed (cluster-scoped conflict with CR-A)", updatedB.Status.Phase)
}
}
// =============================================================================
// QA-2: ExtraArgs can override operator-managed flags.
//
// User sets ExtraArgs=["-port=1234"]. Since args are appended, the volume server
// sees both "-port=8080" and "-port=1234". SeaweedFS's flag parsing takes the
// LAST value, so user can silently override operator-managed ports. This could
// cause the readiness probe (targeting default port) to fail.
// =============================================================================
func TestQA_ExtraArgs_OverridesOperatorFlags(t *testing.T) {
cluster := fullStackClusterWithVolume()
cluster.Spec.Volume.ExtraArgs = []string{"-port=1234"}
// BUG-QA-1 fix: validate() now rejects ExtraArgs that override operator flags
err := validate(&cluster.Spec)
if err == nil {
t.Error("BUG: -port=1234 in volume ExtraArgs should be rejected by validation")
} else if !strings.Contains(err.Error(), "conflicts with operator-managed flag") {
t.Errorf("unexpected error: %v", err)
}
}
func TestQA_ExtraArgs_OverridesOperatorFlags_Master(t *testing.T) {
cluster := fullStackClusterWithVolume()
cluster.Spec.Master.ExtraArgs = []string{"-port=5555", "-mdir=/evil"}
// BUG-QA-1 fix: validate() now rejects ExtraArgs that override operator flags
err := validate(&cluster.Spec)
if err == nil {
t.Error("BUG: -port=5555 in master ExtraArgs should be rejected by validation")
} else if !strings.Contains(err.Error(), "conflicts with operator-managed flag") {
t.Errorf("unexpected error: %v", err)
}
}
// =============================================================================
// QA-3: Malformed rotation timestamp asymmetry.
//
// NeedsRotation: if rotateTS is unparseable → returns false (no rotation).
// If lastRotated is unparseable → returns true (forces rotation). This
// asymmetry means a malformed lastRotated annotation causes infinite rotation.
// =============================================================================
func TestQA_RotationTimestamp_MalformedLastRotated_ForcesInfiniteRotation(t *testing.T) {
cluster := csiOnlyCluster()
cluster.Annotations = map[string]string{
blockv1alpha1.AnnotationRotateSecret: "2025-01-01T00:00:00Z",
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
blockv1alpha1.AnnotationLastRotated: "not-a-valid-timestamp",
},
},
}
if resources.NeedsRotation(cluster, secret) {
t.Error("BUG: malformed lastRotated forces rotation. " +
"If someone manually sets an invalid annotation, every reconcile will regenerate the CHAP password, " +
"breaking all existing iSCSI sessions. Should return false for safety.")
}
}
func TestQA_RotationTimestamp_MalformedRotateSecret_Skips(t *testing.T) {
cluster := csiOnlyCluster()
cluster.Annotations = map[string]string{
blockv1alpha1.AnnotationRotateSecret: "garbage",
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
blockv1alpha1.AnnotationLastRotated: "2025-01-01T00:00:00Z",
},
},
}
// This is correct behavior: unparseable request → skip rotation
if resources.NeedsRotation(cluster, secret) {
t.Error("malformed rotateSecret should skip rotation")
}
}
// =============================================================================
// QA-4: Long CR name exceeds K8s name limits.
//
// K8s resource names must be <= 253 chars. DNS labels (used by Services, Pods
// in StatefulSets) must be <= 63 chars. A long CR name could create invalid
// resource names.
// =============================================================================
func TestQA_LongCRName_ExceedsDNSLabelLimit(t *testing.T) {
// DNS label limit is 63 chars. CR name + "-csi-controller" suffix = +16 chars
// So a CR name of 48+ chars would exceed 63 when suffixed
longName := strings.Repeat("a", 50)
// BUG-QA-3 fix: validateName() now rejects names that would produce >63 char derived names
err := validateName(longName)
if err == nil {
t.Error("BUG: 50-char CR name should be rejected (derived names exceed 63 chars)")
} else if !strings.Contains(err.Error(), "too long") {
t.Errorf("unexpected error: %v", err)
}
// 47 chars should be the max allowed (63 - 16 for "-csi-controller")
okName := strings.Repeat("a", maxCRNameLength)
if err := validateName(okName); err != nil {
t.Errorf("name of %d chars should be valid: %v", maxCRNameLength, err)
}
}
func TestQA_LongCRName_StatefulSetNames(t *testing.T) {
longName := strings.Repeat("b", 50)
replicas := int32(1)
cluster := &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: longName,
Namespace: "default",
UID: "uid-long",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{
Replicas: &replicas,
Storage: &blockv1alpha1.StorageSpec{Size: "1Gi"},
},
Volume: &blockv1alpha1.VolumeSpec{
Storage: &blockv1alpha1.StorageSpec{Size: "10Gi"},
},
},
}
applyDefaults(&cluster.Spec)
masterSTS := resources.BuildMasterStatefulSet(cluster)
// StatefulSet pod names are <sts-name>-<ordinal>, which must also fit in DNS labels
podName := masterSTS.Name + "-0"
if len(podName) > 63 {
t.Errorf("BUG: master StatefulSet pod name %q is %d chars, exceeds DNS label limit",
podName, len(podName))
}
volumeSTS := resources.BuildVolumeStatefulSet(cluster, "master:9333")
podName = volumeSTS.Name + "-0"
if len(podName) > 63 {
t.Errorf("BUG: volume StatefulSet pod name %q is %d chars, exceeds DNS label limit",
podName, len(podName))
}
}
// =============================================================================
// QA-5: Replicas=0 gets misleading error message.
//
// validate() rejects replicas != 1 with "HA deferred to Phase 9C". But
// replicas=0 is not an HA request — it's either a mistake or a scale-to-zero.
// The error message is misleading.
// =============================================================================
func TestQA_MasterReplicas_Zero_MisleadingError(t *testing.T) {
replicas := int32(0)
spec := &blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{Replicas: &replicas},
}
err := validate(spec)
if err == nil {
t.Fatal("replicas=0 should be rejected")
}
// The error should NOT mention "HA" or "Phase 9C" for replicas=0
if strings.Contains(err.Error(), "HA") || strings.Contains(err.Error(), "9C") {
t.Errorf("BUG: replicas=0 error message %q mentions HA/Phase 9C, "+
"but zero replicas is not an HA request — it's invalid input. "+
"Error should say 'replicas must be 1' or 'replicas must be >= 1'.",
err.Error())
}
}
// =============================================================================
// QA-6: Condition cleanup after Failed→Running transition.
//
// After a spec goes from invalid → valid, old failure conditions (ResourceConflict,
// ValidationFailed) should be removed. Test by first failing, then fixing.
// =============================================================================
func TestQA_ConditionCleanup_FailedToRunning(t *testing.T) {
// Start with invalid spec (neither master nor masterRef)
cluster := &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "flip-flop", Namespace: "default", UID: "uid-flip",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{},
}
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
// Reconcile with invalid spec → should fail
reconcile(t, r, "flip-flop", "default") // finalizer
reconcile(t, r, "flip-flop", "default") // validate fails
ctx := context.Background()
var updated blockv1alpha1.SeaweedBlockCluster
if err := c.Get(ctx, types.NamespacedName{Name: "flip-flop", Namespace: "default"}, &updated); err != nil {
t.Fatal(err)
}
if updated.Status.Phase != blockv1alpha1.PhaseFailed {
t.Fatalf("initial phase = %q, want Failed", updated.Status.Phase)
}
// Count failure conditions
failCondCount := 0
for _, cond := range updated.Status.Conditions {
if cond.Type == blockv1alpha1.ConditionValidationFailed || cond.Type == blockv1alpha1.ConditionResourceConflict {
failCondCount++
}
}
if failCondCount == 0 {
t.Fatal("should have at least one failure condition")
}
// Fix the spec
updated.Spec.MasterRef = &blockv1alpha1.MasterRef{Address: "master:9333"}
if err := c.Update(ctx, &updated); err != nil {
t.Fatal(err)
}
// Reconcile again with valid spec
reconcile(t, r, "flip-flop", "default")
// Check that failure conditions are cleared
if err := c.Get(ctx, types.NamespacedName{Name: "flip-flop", Namespace: "default"}, &updated); err != nil {
t.Fatal(err)
}
for _, cond := range updated.Status.Conditions {
if cond.Type == blockv1alpha1.ConditionValidationFailed {
t.Error("BUG: ValidationFailed condition not cleaned up after spec became valid")
}
if cond.Type == blockv1alpha1.ConditionResourceConflict {
t.Error("BUG: ResourceConflict condition not cleaned up after spec became valid")
}
}
}
// =============================================================================
// QA-7: Condition duplication under repeated reconciles.
//
// Multiple reconciles should never produce duplicate conditions.
// =============================================================================
func TestQA_ConditionDeduplication(t *testing.T) {
cluster := csiOnlyCluster()
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
// Run many reconciles
for i := 0; i < 10; i++ {
reconcile(t, r, "test-block", "default")
}
ctx := context.Background()
var updated blockv1alpha1.SeaweedBlockCluster
if err := c.Get(ctx, types.NamespacedName{Name: "test-block", Namespace: "default"}, &updated); err != nil {
t.Fatal(err)
}
// Check for duplicates
seen := make(map[string]int)
for _, cond := range updated.Status.Conditions {
seen[cond.Type]++
if seen[cond.Type] > 1 {
t.Errorf("BUG: duplicate condition type %q (%d occurrences)",
cond.Type, seen[cond.Type])
}
}
}
// =============================================================================
// QA-8: CR name with dots/underscores could create invalid resource names.
//
// K8s names must match [a-z0-9]([a-z0-9.-]*[a-z0-9])? but some contexts
// are stricter. The operator blindly appends suffixes without sanitizing.
// =============================================================================
func TestQA_CRName_SpecialCharacters(t *testing.T) {
// BUG-QA-5 fix: validateName() now checks RFC 1123 DNS label format.
// Only lowercase alphanumeric + hyphens, must start/end with alphanumeric.
cases := []struct {
name string
wantErr bool
}{
{"my-block", false}, // valid
{"my--block", false}, // consecutive hyphens are valid in DNS labels
{"-leading-hyphen", true}, // invalid: leading hyphen
{"trailing-hyphen-", true}, // invalid: trailing hyphen
{"my.block", true}, // dots are not valid in DNS labels (only in DNS names)
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := validateName(tc.name)
if tc.wantErr && err == nil {
t.Errorf("name %q should be rejected by validateName()", tc.name)
}
if !tc.wantErr && err != nil {
t.Errorf("name %q should be valid, got error: %v", tc.name, err)
}
})
}
}
// =============================================================================
// QA-9: StorageClass adoption then deletion doesn't clean up adopted SC.
//
// If we adopt an existing StorageClass, then delete the CR, the finalizer
// should also delete the adopted StorageClass (it now has our labels).
// =============================================================================
func TestQA_AdoptedStorageClass_CleanedUpOnDeletion(t *testing.T) {
// Pre-existing StorageClass without owner labels
existingSC := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "sw-block",
},
Provisioner: blockv1alpha1.CSIDriverName,
}
cluster := csiOnlyCluster()
cluster.Spec.AdoptExistingStorageClass = true
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster, existingSC).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "test-block", "default") // finalizer
reconcile(t, r, "test-block", "default") // adopt SC
ctx := context.Background()
// Verify SC is now owned
var sc storagev1.StorageClass
if err := c.Get(ctx, types.NamespacedName{Name: "sw-block"}, &sc); err != nil {
t.Fatal(err)
}
if sc.Labels[blockv1alpha1.LabelOwnerName] != "test-block" {
t.Fatal("SC should be adopted")
}
// Now delete the CR — call cleanup directly
var latest blockv1alpha1.SeaweedBlockCluster
if err := c.Get(ctx, types.NamespacedName{Name: "test-block", Namespace: "default"}, &latest); err != nil {
t.Fatal(err)
}
if err := r.cleanupOwnedResources(ctx, &latest); err != nil {
t.Fatal(err)
}
// The adopted StorageClass should be cleaned up
err := c.Get(ctx, types.NamespacedName{Name: "sw-block"}, &sc)
if !apierrors.IsNotFound(err) {
t.Error("BUG: adopted StorageClass should be deleted during cleanup, but it still exists")
}
}
// =============================================================================
// QA-10: Reconcile with empty string fields that bypass defaults.
//
// Spec fields like Image, CSIImage are checked for "" in applyDefaults.
// But what about whitespace-only values like " "? They wouldn't match ""
// and would create invalid K8s resources.
// =============================================================================
func TestQA_WhitespaceFields_BypassDefaults(t *testing.T) {
cluster := csiOnlyCluster()
cluster.Spec.Image = " " // whitespace only
cluster.Spec.CSIImage = "\t" // tab only
applyDefaults(&cluster.Spec)
// After defaults, whitespace strings should be replaced with defaults
if strings.TrimSpace(cluster.Spec.Image) == "" && cluster.Spec.Image != blockv1alpha1.DefaultImage {
t.Errorf("BUG: whitespace-only Image %q bypasses defaults. "+
"applyDefaults should trim or check for whitespace.", cluster.Spec.Image)
}
if strings.TrimSpace(cluster.Spec.CSIImage) == "" && cluster.Spec.CSIImage != blockv1alpha1.DefaultCSIImage {
t.Errorf("BUG: whitespace-only CSIImage %q bypasses defaults", cluster.Spec.CSIImage)
}
}
// =============================================================================
// QA-11: Full-stack without Volume spec auto-creates Volume, but validate()
// has Volume port validation that could trigger on the auto-created Volume.
// =============================================================================
func TestQA_FullStackAutoVolume_DefaultsAppliedBeforeValidation(t *testing.T) {
replicas := int32(1)
cluster := &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "auto-vol", Namespace: "default", UID: "uid-av",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{
Replicas: &replicas,
},
// No Volume spec — reconciler auto-creates it
},
}
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
// This should not panic or fail — auto-created Volume should get defaults
reconcile(t, r, "auto-vol", "default") // finalizer
reconcile(t, r, "auto-vol", "default") // create resources
ctx := context.Background()
var volSts appsv1.StatefulSet
if err := c.Get(ctx, types.NamespacedName{Name: "auto-vol-volume", Namespace: "default"}, &volSts); err != nil {
t.Errorf("auto-created Volume StatefulSet should exist: %v", err)
}
}
// =============================================================================
// QA-12: Multiple rapid reconciles of the same CR — no resource version conflicts.
//
// Simulate rapid reconciles to check for ResourceVersion-related issues.
// =============================================================================
func TestQA_RapidReconcile_NoResourceVersionConflict(t *testing.T) {
cluster := csiOnlyCluster()
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
// Run 20 rapid reconciles — should all succeed without error
for i := 0; i < 20; i++ {
_, err := r.Reconcile(context.Background(), ctrl.Request{
NamespacedName: types.NamespacedName{Name: "test-block", Namespace: "default"},
})
if err != nil {
t.Fatalf("reconcile #%d failed: %v", i, err)
}
}
}
// =============================================================================
// QA-13: Cleanup with missing resources doesn't error.
//
// If resources were already manually deleted before the finalizer runs,
// cleanup should succeed (not fail on NotFound).
// =============================================================================
func TestQA_Cleanup_MissingResources_NoError(t *testing.T) {
cluster := csiOnlyCluster()
scheme := testScheme()
// Don't create any resources — simulate pre-deletion of everything
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
// Cleanup should not error even though no resources exist
if err := r.cleanupOwnedResources(context.Background(), cluster); err != nil {
t.Errorf("BUG: cleanup with missing resources should succeed, got: %v", err)
}
}
// =============================================================================
// QA-14: Volume storage size "0" — valid Quantity but nonsensical.
// =============================================================================
func TestQA_StorageSize_Zero(t *testing.T) {
replicas := int32(1)
spec := &blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{
Replicas: &replicas,
Storage: &blockv1alpha1.StorageSpec{Size: "0"},
},
}
// "0" is a valid resource.Quantity but creates a 0-byte PVC.
// validate() should reject it or the reconciler should handle it.
err := validate(spec)
if err == nil {
// If validation passes, verify the builder handles it
applyDefaults(spec)
sts := resources.BuildMasterStatefulSet(&blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{Name: "zero-pvc", Namespace: "default"},
Spec: *spec,
})
if len(sts.Spec.VolumeClaimTemplates) > 0 {
size := sts.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage]
if size.IsZero() {
t.Error("BUG: 0-byte PVC request will be rejected by most storage provisioners. " +
"Consider validating storage size > 0.")
}
}
}
}
// =============================================================================
// QA-15: CSI-only mode with csiNamespace same as CR namespace.
//
// When csiNamespace == CR namespace, cross-namespace logic is used but the
// resources are actually same-namespace. Verify this works correctly.
// =============================================================================
func TestQA_CSINamespace_SameAsCRNamespace(t *testing.T) {
cluster := csiOnlyCluster()
cluster.Spec.CSINamespace = "default" // same as CR namespace
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "test-block", "default") // finalizer
reconcile(t, r, "test-block", "default") // resources
ctx := context.Background()
// CSI controller should be in "default" namespace
var dep appsv1.Deployment
if err := c.Get(ctx, types.NamespacedName{Name: "test-block-csi-controller", Namespace: "default"}, &dep); err != nil {
t.Fatalf("CSI controller should be in 'default' namespace: %v", err)
}
// Cleanup should still work (even though it's same-namespace)
var latest blockv1alpha1.SeaweedBlockCluster
if err := c.Get(ctx, types.NamespacedName{Name: "test-block", Namespace: "default"}, &latest); err != nil {
t.Fatal(err)
}
if err := r.cleanupOwnedResources(ctx, &latest); err != nil {
t.Errorf("cleanup with same-namespace CSI should work: %v", err)
}
// CSI controller should be deleted
err := c.Get(ctx, types.NamespacedName{Name: "test-block-csi-controller", Namespace: "default"}, &dep)
if !apierrors.IsNotFound(err) {
t.Error("CSI controller should be deleted during cleanup")
}
}
// =============================================================================
// QA-16: Ownership label tampering on cluster-scoped resources.
//
// If someone manually changes owner labels on a cluster-scoped resource to
// point to a different CR, the real owner should detect this as a conflict.
// =============================================================================
func TestQA_OwnershipLabel_Tampering(t *testing.T) {
cluster := csiOnlyCluster()
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "test-block", "default") // finalizer
reconcile(t, r, "test-block", "default") // create
ctx := context.Background()
// Tamper with CSIDriver labels — change owner to fake CR
var csiDriver storagev1.CSIDriver
if err := c.Get(ctx, types.NamespacedName{Name: blockv1alpha1.CSIDriverName}, &csiDriver); err != nil {
t.Fatal(err)
}
csiDriver.Labels[blockv1alpha1.LabelOwnerName] = "evil-block"
csiDriver.Labels[blockv1alpha1.LabelOwnerNamespace] = "evil-ns"
if err := c.Update(ctx, &csiDriver); err != nil {
t.Fatal(err)
}
// Next reconcile should detect conflict
reconcile(t, r, "test-block", "default")
var updated blockv1alpha1.SeaweedBlockCluster
if err := c.Get(ctx, types.NamespacedName{Name: "test-block", Namespace: "default"}, &updated); err != nil {
t.Fatal(err)
}
if updated.Status.Phase != blockv1alpha1.PhaseFailed {
t.Errorf("BUG: label tampering should cause conflict detection, phase = %q, want Failed",
updated.Status.Phase)
}
}
// =============================================================================
// QA-17: Cleanup doesn't delete resources owned by a different CR.
//
// If two CRs exist and CR-A is deleted, cleanup should NOT delete resources
// owned by CR-B.
// =============================================================================
func TestQA_Cleanup_DoesNotDeleteOtherCRResources(t *testing.T) {
scheme := testScheme()
// Create a CSI controller deployment owned by CR-B
clusterB := csiOnlyCluster()
clusterB.Name = "block-b"
clusterB.UID = "uid-b"
depB := resources.BuildCSIControllerDeployment(clusterB, "master:9333", "kube-system")
// Create CR-A
clusterA := csiOnlyCluster()
clusterA.Name = "block-a"
clusterA.UID = "uid-a"
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(clusterA, depB).
WithStatusSubresource(clusterA).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
// Cleanup for CR-A should not touch CR-B's deployment
if err := r.cleanupOwnedResources(context.Background(), clusterA); err != nil {
t.Fatal(err)
}
// CR-B's deployment should still exist
ctx := context.Background()
var dep appsv1.Deployment
if err := c.Get(ctx, types.NamespacedName{Name: "block-b-csi-controller", Namespace: "kube-system"}, &dep); err != nil {
t.Error("BUG: cleanup for CR-A should not delete CR-B's resources")
}
}
// =============================================================================
// QA-18: Secret rotation timestamp at exact same second — no rotation.
//
// If rotateSecret and lastRotated are the same timestamp, NeedsRotation
// should return false (rotation already happened).
// =============================================================================
func TestQA_RotationTimestamp_ExactSame_NoRotation(t *testing.T) {
ts := "2025-06-15T12:00:00Z"
cluster := csiOnlyCluster()
cluster.Annotations = map[string]string{
blockv1alpha1.AnnotationRotateSecret: ts,
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
blockv1alpha1.AnnotationLastRotated: ts,
},
},
}
if resources.NeedsRotation(cluster, secret) {
t.Error("BUG: same timestamp should not trigger rotation (already done)")
}
}

657
weed/storage/blockvol/operator/internal/controller/reconciler.go

@ -0,0 +1,657 @@
package controller
import (
"context"
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
"github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/internal/resources"
)
// Reconciler reconciles a SeaweedBlockCluster object.
type Reconciler struct {
client.Client
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=block.seaweedfs.com,resources=seaweedblockclusters,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=block.seaweedfs.com,resources=seaweedblockclusters/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=block.seaweedfs.com,resources=seaweedblockclusters/finalizers,verbs=update
// +kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;statefulsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=services;serviceaccounts;secrets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles;clusterrolebindings,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=storage.k8s.io,resources=csidrivers;storageclasses,verbs=get;list;watch;create;update;patch;delete
// Reconcile handles a single reconciliation loop for a SeaweedBlockCluster.
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
// 1. Fetch CR
var cluster blockv1alpha1.SeaweedBlockCluster
if err := r.Get(ctx, req.NamespacedName, &cluster); err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
// 2. Handle finalizer for resource cleanup (cluster-scoped + cross-namespace)
if !cluster.DeletionTimestamp.IsZero() {
if controllerutil.ContainsFinalizer(&cluster, blockv1alpha1.FinalizerName) {
if err := r.cleanupOwnedResources(ctx, &cluster); err != nil {
logger.Error(err, "failed to cleanup owned resources")
return ctrl.Result{}, err
}
controllerutil.RemoveFinalizer(&cluster, blockv1alpha1.FinalizerName)
if err := r.Update(ctx, &cluster); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
// Ensure finalizer
if !controllerutil.ContainsFinalizer(&cluster, blockv1alpha1.FinalizerName) {
controllerutil.AddFinalizer(&cluster, blockv1alpha1.FinalizerName)
if err := r.Update(ctx, &cluster); err != nil {
return ctrl.Result{}, err
}
}
// 3. Apply defaults
applyDefaults(&cluster.Spec)
// 4. Validate
if err := validateName(cluster.Name); err != nil {
return r.setFailedStatus(ctx, &cluster, blockv1alpha1.ConditionValidationFailed, "ValidationFailed", err.Error())
}
if err := validate(&cluster.Spec); err != nil {
return r.setFailedStatus(ctx, &cluster, blockv1alpha1.ConditionValidationFailed, "ValidationFailed", err.Error())
}
// 5. Determine mode and master address
var masterAddr string
if cluster.Spec.Master != nil {
// Full-stack mode — use FQDN for robustness (L1 fix)
masterAddr = fmt.Sprintf("%s-master.%s.svc.cluster.local:%d",
cluster.Name, cluster.Namespace, cluster.Spec.Master.Port)
// 6a. Master StatefulSet + Service
if err := r.reconcileMasterService(ctx, &cluster); err != nil {
return ctrl.Result{}, err
}
if err := r.reconcileMasterStatefulSet(ctx, &cluster); err != nil {
return ctrl.Result{}, err
}
// 6b. Volume StatefulSet + Service (auto-create VolumeSpec if nil)
if cluster.Spec.Volume == nil {
cluster.Spec.Volume = &blockv1alpha1.VolumeSpec{}
applyDefaults(&cluster.Spec)
}
if err := r.reconcileVolumeService(ctx, &cluster); err != nil {
return ctrl.Result{}, err
}
if err := r.reconcileVolumeStatefulSet(ctx, &cluster, masterAddr); err != nil {
return ctrl.Result{}, err
}
} else {
// CSI-only mode
masterAddr = cluster.Spec.MasterRef.Address
}
csiNS := cluster.Spec.CSINamespace
// 8. CHAP Secret
if err := r.reconcileCHAPSecret(ctx, &cluster, csiNS); err != nil {
return ctrl.Result{}, err
}
// 9-11. RBAC
if err := r.reconcileServiceAccount(ctx, &cluster, csiNS); err != nil {
return ctrl.Result{}, err
}
if conflict, err := r.reconcileClusterRole(ctx, &cluster); err != nil {
return ctrl.Result{}, err
} else if conflict != "" {
return r.setFailedStatus(ctx, &cluster, blockv1alpha1.ConditionResourceConflict, "ResourceConflict", conflict)
}
if conflict, err := r.reconcileClusterRoleBinding(ctx, &cluster, csiNS); err != nil {
return ctrl.Result{}, err
} else if conflict != "" {
return r.setFailedStatus(ctx, &cluster, blockv1alpha1.ConditionResourceConflict, "ResourceConflict", conflict)
}
// 12. CSIDriver
if conflict, err := r.reconcileCSIDriver(ctx, &cluster); err != nil {
return ctrl.Result{}, err
} else if conflict != "" {
return r.setFailedStatus(ctx, &cluster, blockv1alpha1.ConditionResourceConflict, "ResourceConflict", conflict)
}
// 13. CSI Controller Deployment
if err := r.reconcileCSIController(ctx, &cluster, masterAddr, csiNS); err != nil {
return ctrl.Result{}, err
}
// 14. CSI Node DaemonSet
if err := r.reconcileCSINode(ctx, &cluster, csiNS); err != nil {
return ctrl.Result{}, err
}
// 15. StorageClass
if conflict, err := r.reconcileStorageClass(ctx, &cluster); err != nil {
return ctrl.Result{}, err
} else if conflict != "" {
return r.setFailedStatus(ctx, &cluster, blockv1alpha1.ConditionResourceConflict, "ResourceConflict", conflict)
}
// 16-17. Compute and update status
return r.updateStatus(ctx, &cluster, masterAddr)
}
// SetupWithManager registers the controller with the manager.
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&blockv1alpha1.SeaweedBlockCluster{}).
// Owns() only works same-namespace. CSI resources in csiNamespace use
// label-based ownership + finalizer cleanup instead. We still watch
// same-namespace resources (master/volume StatefulSets and Services).
Owns(&appsv1.StatefulSet{}).
Owns(&corev1.Service{}).
Complete(r)
}
// --- Namespace-scoped reconcilers (cross-namespace CSI resources) ---
func (r *Reconciler) reconcileServiceAccount(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster, csiNS string) error {
desired := resources.BuildServiceAccount(cluster, csiNS)
return r.createOrUpdateCrossNamespace(ctx, cluster, desired, &corev1.ServiceAccount{})
}
func (r *Reconciler) reconcileCSIController(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster, masterAddr, csiNS string) error {
desired := resources.BuildCSIControllerDeployment(cluster, masterAddr, csiNS)
return r.createOrUpdateCrossNamespace(ctx, cluster, desired, &appsv1.Deployment{})
}
func (r *Reconciler) reconcileCSINode(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster, csiNS string) error {
desired := resources.BuildCSINodeDaemonSet(cluster, csiNS)
return r.createOrUpdateCrossNamespace(ctx, cluster, desired, &appsv1.DaemonSet{})
}
func (r *Reconciler) reconcileCHAPSecret(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster, csiNS string) error {
// If user provides their own secret, skip
if cluster.Spec.Auth != nil && cluster.Spec.Auth.SecretRef != nil {
return nil
}
secretName := resources.CHAPSecretName(cluster)
var existing corev1.Secret
err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: csiNS}, &existing)
if err != nil {
if !apierrors.IsNotFound(err) {
return err
}
// Create new secret
secret, genErr := resources.BuildCHAPSecret(cluster, csiNS)
if genErr != nil {
return genErr
}
return r.Create(ctx, secret)
}
// Secret exists — check rotation
if resources.NeedsRotation(cluster, &existing) {
if err := resources.RegenerateCHAPPassword(&existing); err != nil {
return err
}
return r.Update(ctx, &existing)
}
return nil
}
// --- Namespace-scoped reconcilers (same-namespace, use ownerRef) ---
func (r *Reconciler) reconcileMasterService(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster) error {
desired := resources.BuildMasterService(cluster)
return r.createOrUpdateSameNamespace(ctx, desired, &corev1.Service{})
}
func (r *Reconciler) reconcileMasterStatefulSet(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster) error {
desired := resources.BuildMasterStatefulSet(cluster)
return r.createOrUpdateSameNamespace(ctx, desired, &appsv1.StatefulSet{})
}
func (r *Reconciler) reconcileVolumeService(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster) error {
desired := resources.BuildVolumeService(cluster)
return r.createOrUpdateSameNamespace(ctx, desired, &corev1.Service{})
}
func (r *Reconciler) reconcileVolumeStatefulSet(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster, masterAddr string) error {
desired := resources.BuildVolumeStatefulSet(cluster, masterAddr)
return r.createOrUpdateSameNamespace(ctx, desired, &appsv1.StatefulSet{})
}
// --- Cluster-scoped reconcilers ---
func (r *Reconciler) reconcileClusterRole(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster) (conflict string, err error) {
desired := resources.BuildClusterRole(cluster)
return r.createOrUpdateClusterScoped(ctx, cluster, desired, &rbacv1.ClusterRole{})
}
func (r *Reconciler) reconcileClusterRoleBinding(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster, csiNS string) (conflict string, err error) {
desired := resources.BuildClusterRoleBinding(cluster, csiNS)
return r.createOrUpdateClusterScoped(ctx, cluster, desired, &rbacv1.ClusterRoleBinding{})
}
func (r *Reconciler) reconcileCSIDriver(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster) (conflict string, err error) {
desired := resources.BuildCSIDriverResource(cluster)
return r.createOrUpdateClusterScoped(ctx, cluster, desired, &storagev1.CSIDriver{})
}
func (r *Reconciler) reconcileStorageClass(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster) (conflict string, err error) {
desired := resources.BuildStorageClass(cluster)
var existing storagev1.StorageClass
err = r.Get(ctx, types.NamespacedName{Name: desired.Name}, &existing)
if err != nil {
if apierrors.IsNotFound(err) {
return "", r.Create(ctx, desired)
}
return "", err
}
ownership := resources.CheckOwnership(&existing, cluster)
switch ownership {
case resources.OwnershipOwned:
existing.Labels = desired.Labels
return "", r.Update(ctx, &existing)
case resources.OwnershipConflict:
return fmt.Sprintf("StorageClass %s already owned by %s", desired.Name, resources.ConflictOwner(&existing)), nil
case resources.OwnershipOrphan:
if cluster.Spec.AdoptExistingStorageClass {
existing.Labels = desired.Labels
return "", r.Update(ctx, &existing)
}
return fmt.Sprintf("StorageClass %s exists and is not managed by this operator. Set adoptExistingStorageClass: true to adopt.", desired.Name), nil
}
return "", nil
}
// --- Generic helpers ---
// createOrUpdateSameNamespace creates or updates a resource in the same namespace as the CR.
// Uses ownerReference for GC.
func (r *Reconciler) createOrUpdateSameNamespace(ctx context.Context, desired client.Object, existing client.Object) error {
key := types.NamespacedName{
Name: desired.GetName(),
Namespace: desired.GetNamespace(),
}
err := r.Get(ctx, key, existing)
if err != nil {
if apierrors.IsNotFound(err) {
return r.Create(ctx, desired)
}
return err
}
desired.SetResourceVersion(existing.GetResourceVersion())
desired.SetUID(existing.GetUID())
return r.Update(ctx, desired)
}
// createOrUpdateCrossNamespace creates or updates a resource in a different namespace than the CR.
// Uses ownership labels (not ownerRef) for tracking. Cleanup happens via finalizer.
func (r *Reconciler) createOrUpdateCrossNamespace(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster, desired client.Object, existing client.Object) error {
key := types.NamespacedName{
Name: desired.GetName(),
Namespace: desired.GetNamespace(),
}
err := r.Get(ctx, key, existing)
if err != nil {
if apierrors.IsNotFound(err) {
return r.Create(ctx, desired)
}
return err
}
// On update, ensure ownership labels are preserved
resources.SetCrossNamespaceOwnership(cluster, desired)
desired.SetResourceVersion(existing.GetResourceVersion())
desired.SetUID(existing.GetUID())
return r.Update(ctx, desired)
}
// createOrUpdateClusterScoped creates or updates a cluster-scoped resource with ownership labels.
// Returns (conflict message, error). Non-empty conflict means another CR owns this resource.
func (r *Reconciler) createOrUpdateClusterScoped(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster, desired client.Object, existing client.Object) (string, error) {
key := types.NamespacedName{Name: desired.GetName()}
err := r.Get(ctx, key, existing)
if err != nil {
if apierrors.IsNotFound(err) {
return "", r.Create(ctx, desired)
}
return "", err
}
ownership := resources.CheckOwnership(existing, cluster)
switch ownership {
case resources.OwnershipOwned:
desired.SetResourceVersion(existing.GetResourceVersion())
desired.SetUID(existing.GetUID())
return "", r.Update(ctx, desired)
case resources.OwnershipConflict:
kind := existing.GetObjectKind().GroupVersionKind().Kind
if kind == "" {
kind = fmt.Sprintf("%T", existing)
}
return fmt.Sprintf("%s %s already owned by %s", kind, desired.GetName(), resources.ConflictOwner(existing)), nil
case resources.OwnershipOrphan:
// M4 fix: require explicit opt-in for orphan adoption of security-sensitive resources.
// For CSIDriver/ClusterRole/CRB, fail rather than silently adopting.
kind := existing.GetObjectKind().GroupVersionKind().Kind
if kind == "" {
kind = fmt.Sprintf("%T", existing)
}
return fmt.Sprintf("%s %s exists but is not managed by this operator", kind, desired.GetName()), nil
}
return "", nil
}
// cleanupOwnedResources deletes all resources owned by this CR:
// cluster-scoped (CSIDriver, ClusterRole, CRB, StorageClass) AND
// cross-namespace (CSI controller, node, SA, secret in csiNamespace).
func (r *Reconciler) cleanupOwnedResources(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster) error {
logger := log.FromContext(ctx)
// --- Cluster-scoped resources ---
// CSIDriver
var csiDriver storagev1.CSIDriver
if err := r.Get(ctx, types.NamespacedName{Name: blockv1alpha1.CSIDriverName}, &csiDriver); err == nil {
if resources.IsOwnedBy(&csiDriver, cluster) {
logger.Info("deleting CSIDriver", "name", csiDriver.Name)
if err := r.Delete(ctx, &csiDriver); err != nil && !apierrors.IsNotFound(err) {
return err
}
}
}
// ClusterRole
var cr rbacv1.ClusterRole
if err := r.Get(ctx, types.NamespacedName{Name: resources.ClusterRoleName()}, &cr); err == nil {
if resources.IsOwnedBy(&cr, cluster) {
logger.Info("deleting ClusterRole", "name", cr.Name)
if err := r.Delete(ctx, &cr); err != nil && !apierrors.IsNotFound(err) {
return err
}
}
}
// ClusterRoleBinding
var crb rbacv1.ClusterRoleBinding
if err := r.Get(ctx, types.NamespacedName{Name: resources.ClusterRoleBindingName()}, &crb); err == nil {
if resources.IsOwnedBy(&crb, cluster) {
logger.Info("deleting ClusterRoleBinding", "name", crb.Name)
if err := r.Delete(ctx, &crb); err != nil && !apierrors.IsNotFound(err) {
return err
}
}
}
// StorageClass
scName := cluster.Spec.StorageClassName
if scName == "" {
scName = blockv1alpha1.DefaultStorageClassName
}
var sc storagev1.StorageClass
if err := r.Get(ctx, types.NamespacedName{Name: scName}, &sc); err == nil {
if resources.IsOwnedBy(&sc, cluster) {
logger.Info("deleting StorageClass", "name", sc.Name)
if err := r.Delete(ctx, &sc); err != nil && !apierrors.IsNotFound(err) {
return err
}
}
}
// --- Cross-namespace resources in csiNamespace (H1 fix) ---
csiNS := cluster.Spec.CSINamespace
if csiNS == "" {
csiNS = blockv1alpha1.DefaultCSINamespace
}
// CSI Controller Deployment
var dep appsv1.Deployment
depName := resources.CSIControllerName(cluster)
if err := r.Get(ctx, types.NamespacedName{Name: depName, Namespace: csiNS}, &dep); err == nil {
if resources.IsOwnedBy(&dep, cluster) {
logger.Info("deleting CSI controller Deployment", "name", depName, "namespace", csiNS)
if err := r.Delete(ctx, &dep); err != nil && !apierrors.IsNotFound(err) {
return err
}
}
}
// CSI Node DaemonSet
var ds appsv1.DaemonSet
dsName := resources.CSINodeName(cluster)
if err := r.Get(ctx, types.NamespacedName{Name: dsName, Namespace: csiNS}, &ds); err == nil {
if resources.IsOwnedBy(&ds, cluster) {
logger.Info("deleting CSI node DaemonSet", "name", dsName, "namespace", csiNS)
if err := r.Delete(ctx, &ds); err != nil && !apierrors.IsNotFound(err) {
return err
}
}
}
// ServiceAccount
var sa corev1.ServiceAccount
saName := resources.ServiceAccountName()
if err := r.Get(ctx, types.NamespacedName{Name: saName, Namespace: csiNS}, &sa); err == nil {
if resources.IsOwnedBy(&sa, cluster) {
logger.Info("deleting ServiceAccount", "name", saName, "namespace", csiNS)
if err := r.Delete(ctx, &sa); err != nil && !apierrors.IsNotFound(err) {
return err
}
}
}
// CHAP Secret
var secret corev1.Secret
secretName := resources.CHAPSecretName(cluster)
if err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: csiNS}, &secret); err == nil {
if resources.IsOwnedBy(&secret, cluster) {
logger.Info("deleting CHAP Secret", "name", secretName, "namespace", csiNS)
if err := r.Delete(ctx, &secret); err != nil && !apierrors.IsNotFound(err) {
return err
}
}
}
return nil
}
// --- Status helpers ---
func (r *Reconciler) setFailedStatus(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster, condType, reason, message string) (ctrl.Result, error) {
cluster.Status.Phase = blockv1alpha1.PhaseFailed
setCondition(&cluster.Status, metav1.Condition{
Type: condType,
Status: metav1.ConditionTrue,
Reason: reason,
Message: message,
LastTransitionTime: metav1.Now(),
})
if err := r.Status().Update(ctx, cluster); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *Reconciler) updateStatus(ctx context.Context, cluster *blockv1alpha1.SeaweedBlockCluster, masterAddr string) (ctrl.Result, error) {
cluster.Status.MasterAddress = masterAddr
cluster.Status.Phase = blockv1alpha1.PhaseRunning
// Clear any previous failure conditions
removeCondition(&cluster.Status, blockv1alpha1.ConditionResourceConflict)
removeCondition(&cluster.Status, blockv1alpha1.ConditionValidationFailed)
// MasterReady condition
if cluster.Spec.Master != nil {
masterReady := r.checkStatefulSetReady(ctx, cluster.Name+"-master", cluster.Namespace)
setCondition(&cluster.Status, metav1.Condition{
Type: blockv1alpha1.ConditionMasterReady,
Status: boolToStatus(masterReady),
Reason: readyReason(masterReady),
LastTransitionTime: metav1.Now(),
})
if !masterReady {
cluster.Status.Phase = blockv1alpha1.PhasePending
}
} else {
setCondition(&cluster.Status, metav1.Condition{
Type: blockv1alpha1.ConditionMasterReady,
Status: metav1.ConditionTrue,
Reason: "ExternalMaster",
Message: "Using external master: " + masterAddr,
LastTransitionTime: metav1.Now(),
})
}
// VolumeReady condition
if cluster.Spec.Volume != nil && cluster.Spec.Master != nil {
volReady := r.checkStatefulSetReady(ctx, cluster.Name+"-volume", cluster.Namespace)
setCondition(&cluster.Status, metav1.Condition{
Type: blockv1alpha1.ConditionVolumeReady,
Status: boolToStatus(volReady),
Reason: readyReason(volReady),
LastTransitionTime: metav1.Now(),
})
if !volReady {
cluster.Status.Phase = blockv1alpha1.PhasePending
}
} else {
setCondition(&cluster.Status, metav1.Condition{
Type: blockv1alpha1.ConditionVolumeReady,
Status: metav1.ConditionTrue,
Reason: "ExternalVolume",
LastTransitionTime: metav1.Now(),
})
}
// CSIReady condition
csiNS := cluster.Spec.CSINamespace
if csiNS == "" {
csiNS = blockv1alpha1.DefaultCSINamespace
}
controllerReady := r.checkDeploymentReady(ctx, resources.CSIControllerName(cluster), csiNS)
nodeReady := r.checkDaemonSetReady(ctx, resources.CSINodeName(cluster), csiNS)
csiReady := controllerReady && nodeReady
setCondition(&cluster.Status, metav1.Condition{
Type: blockv1alpha1.ConditionCSIReady,
Status: boolToStatus(csiReady),
Reason: readyReason(csiReady),
LastTransitionTime: metav1.Now(),
})
// AuthConfigured condition
authOK := cluster.Spec.Auth != nil && cluster.Spec.Auth.SecretRef != nil
if !authOK {
var secret corev1.Secret
if err := r.Get(ctx, types.NamespacedName{
Name: resources.CHAPSecretName(cluster),
Namespace: csiNS,
}, &secret); err == nil {
authOK = true
}
}
setCondition(&cluster.Status, metav1.Condition{
Type: blockv1alpha1.ConditionAuthConfigured,
Status: boolToStatus(authOK),
Reason: readyReason(authOK),
LastTransitionTime: metav1.Now(),
})
if err := r.Status().Update(ctx, cluster); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *Reconciler) checkStatefulSetReady(ctx context.Context, name, namespace string) bool {
var sts appsv1.StatefulSet
if err := r.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, &sts); err != nil {
return false
}
return sts.Status.ReadyReplicas >= 1
}
func (r *Reconciler) checkDeploymentReady(ctx context.Context, name, namespace string) bool {
var dep appsv1.Deployment
if err := r.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, &dep); err != nil {
return false
}
return dep.Status.ReadyReplicas >= 1
}
func (r *Reconciler) checkDaemonSetReady(ctx context.Context, name, namespace string) bool {
var ds appsv1.DaemonSet
if err := r.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, &ds); err != nil {
return false
}
return ds.Status.NumberReady >= 1
}
// --- Condition helpers ---
func setCondition(status *blockv1alpha1.SeaweedBlockClusterStatus, cond metav1.Condition) {
for i, existing := range status.Conditions {
if existing.Type == cond.Type {
status.Conditions[i] = cond
return
}
}
status.Conditions = append(status.Conditions, cond)
}
func removeCondition(status *blockv1alpha1.SeaweedBlockClusterStatus, condType string) {
filtered := status.Conditions[:0]
for _, c := range status.Conditions {
if c.Type != condType {
filtered = append(filtered, c)
}
}
status.Conditions = filtered
}
func boolToStatus(b bool) metav1.ConditionStatus {
if b {
return metav1.ConditionTrue
}
return metav1.ConditionFalse
}
func readyReason(ready bool) string {
if ready {
return "Ready"
}
return "NotReady"
}

970
weed/storage/blockvol/operator/internal/controller/reconciler_test.go

@ -0,0 +1,970 @@
package controller
import (
"context"
"testing"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
"github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/internal/resources"
)
func testScheme() *runtime.Scheme {
s := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(s)
_ = blockv1alpha1.AddToScheme(s)
return s
}
func csiOnlyCluster() *blockv1alpha1.SeaweedBlockCluster {
return &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-block",
Namespace: "default",
UID: "uid-123",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{
CSIImage: "sw-block-csi:local",
MasterRef: &blockv1alpha1.MasterRef{
Address: "master.default:9333",
},
StorageClassName: "sw-block",
CSINamespace: "kube-system",
},
}
}
func fullStackCluster() *blockv1alpha1.SeaweedBlockCluster {
replicas := int32(1)
return &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-full",
Namespace: "default",
UID: "uid-456",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{
Image: "chrislusf/seaweedfs:latest",
CSIImage: "sw-block-csi:local",
Master: &blockv1alpha1.MasterSpec{
Replicas: &replicas,
Port: 9333,
GRPCPort: 19333,
Storage: &blockv1alpha1.StorageSpec{Size: "5Gi"},
},
StorageClassName: "sw-block-full",
CSINamespace: "kube-system",
},
}
}
func reconcile(t *testing.T, r *Reconciler, name, ns string) ctrl.Result {
t.Helper()
result, err := r.Reconcile(context.Background(), ctrl.Request{
NamespacedName: types.NamespacedName{Name: name, Namespace: ns},
})
if err != nil {
t.Fatalf("Reconcile error: %v", err)
}
return result
}
func ensureNamespace(objects []runtime.Object) []runtime.Object {
for _, o := range objects {
if ns, ok := o.(*corev1.Namespace); ok {
if ns.Name == "kube-system" {
return objects
}
}
}
return append(objects, &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "kube-system"},
})
}
// --- Test: CSI-only mode creates all CSI sub-resources ---
func TestReconcile_CSIOnly_CreatesResources(t *testing.T) {
cluster := csiOnlyCluster()
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
// First reconcile: adds finalizer
reconcile(t, r, "test-block", "default")
// Second reconcile: creates resources (after finalizer is persisted)
reconcile(t, r, "test-block", "default")
ctx := context.Background()
// Verify finalizer was added
var updated blockv1alpha1.SeaweedBlockCluster
if err := c.Get(ctx, types.NamespacedName{Name: "test-block", Namespace: "default"}, &updated); err != nil {
t.Fatalf("get cluster: %v", err)
}
hasFinalizer := false
for _, f := range updated.Finalizers {
if f == blockv1alpha1.FinalizerName {
hasFinalizer = true
}
}
if !hasFinalizer {
t.Error("missing finalizer")
}
// CSI Controller Deployment
var dep appsv1.Deployment
if err := c.Get(ctx, types.NamespacedName{Name: "test-block-csi-controller", Namespace: "kube-system"}, &dep); err != nil {
t.Errorf("CSI controller not created: %v", err)
}
// CSI Node DaemonSet
var ds appsv1.DaemonSet
if err := c.Get(ctx, types.NamespacedName{Name: "test-block-csi-node", Namespace: "kube-system"}, &ds); err != nil {
t.Errorf("CSI node not created: %v", err)
}
// ServiceAccount
var sa corev1.ServiceAccount
if err := c.Get(ctx, types.NamespacedName{Name: "sw-block-csi", Namespace: "kube-system"}, &sa); err != nil {
t.Errorf("ServiceAccount not created: %v", err)
}
// ClusterRole
var cr rbacv1.ClusterRole
if err := c.Get(ctx, types.NamespacedName{Name: "sw-block-csi"}, &cr); err != nil {
t.Errorf("ClusterRole not created: %v", err)
}
// ClusterRoleBinding
var crb rbacv1.ClusterRoleBinding
if err := c.Get(ctx, types.NamespacedName{Name: "sw-block-csi"}, &crb); err != nil {
t.Errorf("ClusterRoleBinding not created: %v", err)
}
// CSIDriver
var csiDriver storagev1.CSIDriver
if err := c.Get(ctx, types.NamespacedName{Name: blockv1alpha1.CSIDriverName}, &csiDriver); err != nil {
t.Errorf("CSIDriver not created: %v", err)
}
// StorageClass
var sc storagev1.StorageClass
if err := c.Get(ctx, types.NamespacedName{Name: "sw-block"}, &sc); err != nil {
t.Errorf("StorageClass not created: %v", err)
}
// CHAP Secret
var secret corev1.Secret
if err := c.Get(ctx, types.NamespacedName{Name: "test-block-chap", Namespace: "kube-system"}, &secret); err != nil {
t.Errorf("CHAP secret not created: %v", err)
}
// No master or volume StatefulSets in CSI-only mode
var masterSts appsv1.StatefulSet
if err := c.Get(ctx, types.NamespacedName{Name: "test-block-master", Namespace: "default"}, &masterSts); !apierrors.IsNotFound(err) {
t.Error("master StatefulSet should not exist in CSI-only mode")
}
}
// --- Test: Invalid spec (both master+masterRef) → Failed ---
func TestReconcile_BothMasterAndRef_Failed(t *testing.T) {
replicas := int32(1)
cluster := &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "bad-block",
Namespace: "default",
UID: "uid-bad",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{Replicas: &replicas},
MasterRef: &blockv1alpha1.MasterRef{Address: "x:9333"},
},
}
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
// First reconcile: add finalizer
reconcile(t, r, "bad-block", "default")
// Second: validate
reconcile(t, r, "bad-block", "default")
var updated blockv1alpha1.SeaweedBlockCluster
if err := c.Get(context.Background(), types.NamespacedName{Name: "bad-block", Namespace: "default"}, &updated); err != nil {
t.Fatal(err)
}
if updated.Status.Phase != blockv1alpha1.PhaseFailed {
t.Errorf("phase = %q, want Failed", updated.Status.Phase)
}
}
// --- Test: Neither master nor masterRef → Failed ---
func TestReconcile_NeitherMasterNorRef_Failed(t *testing.T) {
cluster := &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "empty-block",
Namespace: "default",
UID: "uid-empty",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{},
}
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "empty-block", "default")
reconcile(t, r, "empty-block", "default")
var updated blockv1alpha1.SeaweedBlockCluster
_ = c.Get(context.Background(), types.NamespacedName{Name: "empty-block", Namespace: "default"}, &updated)
if updated.Status.Phase != blockv1alpha1.PhaseFailed {
t.Errorf("phase = %q, want Failed", updated.Status.Phase)
}
}
// --- Test: Master replicas > 1 → Failed ---
func TestReconcile_MasterReplicasGT1_Failed(t *testing.T) {
replicas := int32(3)
cluster := &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "ha-block", Namespace: "default", UID: "uid-ha",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{Replicas: &replicas},
},
}
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "ha-block", "default")
reconcile(t, r, "ha-block", "default")
var updated blockv1alpha1.SeaweedBlockCluster
_ = c.Get(context.Background(), types.NamespacedName{Name: "ha-block", Namespace: "default"}, &updated)
if updated.Status.Phase != blockv1alpha1.PhaseFailed {
t.Errorf("phase = %q, want Failed", updated.Status.Phase)
}
}
// --- Test: Multi-CR safety — second CR with conflicting cluster-scoped resources ---
func TestReconcile_MultiCR_ConflictDetected(t *testing.T) {
// Pre-create cluster-scoped resources owned by another CR
clusterA := csiOnlyCluster()
clusterA.Name = "block-a"
clusterA.UID = "uid-a"
// Create CSIDriver owned by block-a
csiDriver := resources.BuildCSIDriverResource(clusterA)
clusterB := csiOnlyCluster()
clusterB.Name = "block-b"
clusterB.UID = "uid-b"
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(clusterB, csiDriver).
WithStatusSubresource(clusterB).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "block-b", "default") // finalizer
reconcile(t, r, "block-b", "default") // reconcile
var updated blockv1alpha1.SeaweedBlockCluster
_ = c.Get(context.Background(), types.NamespacedName{Name: "block-b", Namespace: "default"}, &updated)
if updated.Status.Phase != blockv1alpha1.PhaseFailed {
t.Errorf("phase = %q, want Failed (conflict)", updated.Status.Phase)
}
// Should have ResourceConflict condition
found := false
for _, c := range updated.Status.Conditions {
if c.Type == blockv1alpha1.ConditionResourceConflict && c.Status == metav1.ConditionTrue {
found = true
}
}
if !found {
t.Error("missing ResourceConflict condition")
}
}
// --- Test: Reconcile idempotency ---
func TestReconcile_Idempotent(t *testing.T) {
cluster := csiOnlyCluster()
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
// Run 3 reconciles
for i := 0; i < 3; i++ {
reconcile(t, r, "test-block", "default")
}
// Should have exactly 1 CSI controller deployment
ctx := context.Background()
var dep appsv1.Deployment
if err := c.Get(ctx, types.NamespacedName{Name: "test-block-csi-controller", Namespace: "kube-system"}, &dep); err != nil {
t.Errorf("CSI controller missing after idempotent reconciles: %v", err)
}
}
// --- Test: Secret rotation via annotation ---
func TestReconcile_SecretRotation(t *testing.T) {
cluster := csiOnlyCluster()
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
// Create resources
reconcile(t, r, "test-block", "default")
reconcile(t, r, "test-block", "default")
// Get original password
ctx := context.Background()
var secret corev1.Secret
if err := c.Get(ctx, types.NamespacedName{Name: "test-block-chap", Namespace: "kube-system"}, &secret); err != nil {
t.Fatalf("get secret: %v", err)
}
originalPassword := string(secret.Data["password"])
// Add rotation annotation
var updated blockv1alpha1.SeaweedBlockCluster
if err := c.Get(ctx, types.NamespacedName{Name: "test-block", Namespace: "default"}, &updated); err != nil {
t.Fatal(err)
}
if updated.Annotations == nil {
updated.Annotations = make(map[string]string)
}
updated.Annotations[blockv1alpha1.AnnotationRotateSecret] = "2099-01-01T00:00:00Z"
if err := c.Update(ctx, &updated); err != nil {
t.Fatal(err)
}
// Reconcile again
reconcile(t, r, "test-block", "default")
// Check password changed
if err := c.Get(ctx, types.NamespacedName{Name: "test-block-chap", Namespace: "kube-system"}, &secret); err != nil {
t.Fatal(err)
}
newPassword := string(secret.Data["password"])
if newPassword == originalPassword {
t.Error("password should have changed after rotation")
}
}
// --- Test: User-provided secret ref skips auto-generation ---
func TestReconcile_UserProvidedSecret_SkipsAutoGen(t *testing.T) {
cluster := csiOnlyCluster()
cluster.Spec.Auth = &blockv1alpha1.AuthSpec{
SecretRef: &corev1.LocalObjectReference{Name: "my-custom-secret"},
}
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "test-block", "default")
reconcile(t, r, "test-block", "default")
// Auto-generated secret should NOT exist
var secret corev1.Secret
err := c.Get(context.Background(), types.NamespacedName{Name: "test-block-chap", Namespace: "kube-system"}, &secret)
if !apierrors.IsNotFound(err) {
t.Error("auto-generated secret should not exist when secretRef is provided")
}
}
// --- Test: Full-stack mode creates master + volume StatefulSets ---
func TestReconcile_FullStack_CreatesMasterAndVolume(t *testing.T) {
cluster := fullStackCluster()
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "test-full", "default")
reconcile(t, r, "test-full", "default")
ctx := context.Background()
// Master Service
var masterSvc corev1.Service
if err := c.Get(ctx, types.NamespacedName{Name: "test-full-master", Namespace: "default"}, &masterSvc); err != nil {
t.Errorf("master service not created: %v", err)
}
// Master StatefulSet
var masterSts appsv1.StatefulSet
if err := c.Get(ctx, types.NamespacedName{Name: "test-full-master", Namespace: "default"}, &masterSts); err != nil {
t.Errorf("master StatefulSet not created: %v", err)
}
// Volume Service
var volSvc corev1.Service
if err := c.Get(ctx, types.NamespacedName{Name: "test-full-volume", Namespace: "default"}, &volSvc); err != nil {
t.Errorf("volume service not created: %v", err)
}
// Volume StatefulSet
var volSts appsv1.StatefulSet
if err := c.Get(ctx, types.NamespacedName{Name: "test-full-volume", Namespace: "default"}, &volSts); err != nil {
t.Errorf("volume StatefulSet not created: %v", err)
}
// CSI resources also created
var dep appsv1.Deployment
if err := c.Get(ctx, types.NamespacedName{Name: "test-full-csi-controller", Namespace: "kube-system"}, &dep); err != nil {
t.Errorf("CSI controller not created in full-stack mode: %v", err)
}
}
// --- Test: Volume without master → Failed ---
func TestReconcile_VolumeWithoutMaster_Failed(t *testing.T) {
cluster := &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "vol-only", Namespace: "default", UID: "uid-vol",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{
MasterRef: &blockv1alpha1.MasterRef{Address: "m:9333"},
Volume: &blockv1alpha1.VolumeSpec{},
},
}
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "vol-only", "default")
reconcile(t, r, "vol-only", "default")
var updated blockv1alpha1.SeaweedBlockCluster
_ = c.Get(context.Background(), types.NamespacedName{Name: "vol-only", Namespace: "default"}, &updated)
if updated.Status.Phase != blockv1alpha1.PhaseFailed {
t.Errorf("phase = %q, want Failed", updated.Status.Phase)
}
}
// --- Test: Defaults are applied ---
func TestApplyDefaults(t *testing.T) {
spec := &blockv1alpha1.SeaweedBlockClusterSpec{}
applyDefaults(spec)
if spec.Image != blockv1alpha1.DefaultImage {
t.Errorf("image = %q", spec.Image)
}
if spec.CSIImage != blockv1alpha1.DefaultCSIImage {
t.Errorf("csiImage = %q", spec.CSIImage)
}
if spec.StorageClassName != blockv1alpha1.DefaultStorageClassName {
t.Errorf("storageClassName = %q", spec.StorageClassName)
}
if spec.CSINamespace != blockv1alpha1.DefaultCSINamespace {
t.Errorf("csiNamespace = %q", spec.CSINamespace)
}
if spec.CSI == nil {
t.Fatal("CSI should be initialized")
}
if spec.CSI.ControllerReplicas == nil || *spec.CSI.ControllerReplicas != 1 {
t.Error("CSI controller replicas should default to 1")
}
}
func TestApplyDefaults_MasterAndVolume(t *testing.T) {
spec := &blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{},
Volume: &blockv1alpha1.VolumeSpec{},
}
applyDefaults(spec)
if spec.Master.Replicas == nil || *spec.Master.Replicas != 1 {
t.Error("master replicas should default to 1")
}
if spec.Master.Port != blockv1alpha1.DefaultMasterPort {
t.Errorf("master port = %d", spec.Master.Port)
}
if spec.Volume.Port != blockv1alpha1.DefaultVolumePort {
t.Errorf("volume port = %d", spec.Volume.Port)
}
if spec.Volume.BlockDir != blockv1alpha1.DefaultBlockDir {
t.Errorf("blockDir = %q", spec.Volume.BlockDir)
}
if spec.Volume.BlockListenPort != blockv1alpha1.DefaultBlockListenPort {
t.Errorf("blockListenPort = %d", spec.Volume.BlockListenPort)
}
}
// --- Test: Validation rules ---
func TestValidate(t *testing.T) {
tests := []struct {
name string
spec blockv1alpha1.SeaweedBlockClusterSpec
wantErr string
}{
{
name: "both master and masterRef",
spec: blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{},
MasterRef: &blockv1alpha1.MasterRef{Address: "x:9333"},
},
wantErr: "mutually exclusive",
},
{
name: "neither master nor masterRef",
spec: blockv1alpha1.SeaweedBlockClusterSpec{},
wantErr: "one of spec.master or spec.masterRef is required",
},
{
name: "volume without master",
spec: blockv1alpha1.SeaweedBlockClusterSpec{
MasterRef: &blockv1alpha1.MasterRef{Address: "x:9333"},
Volume: &blockv1alpha1.VolumeSpec{},
},
wantErr: "spec.volume requires spec.master",
},
{
name: "master replicas > 1",
spec: func() blockv1alpha1.SeaweedBlockClusterSpec {
r := int32(3)
return blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{Replicas: &r},
}
}(),
wantErr: "deferred to Phase 9C",
},
{
name: "valid CSI-only",
spec: blockv1alpha1.SeaweedBlockClusterSpec{
MasterRef: &blockv1alpha1.MasterRef{Address: "m:9333"},
},
},
{
name: "valid full-stack",
spec: func() blockv1alpha1.SeaweedBlockClusterSpec {
r := int32(1)
return blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{Replicas: &r},
}
}(),
},
{
name: "empty masterRef address",
spec: blockv1alpha1.SeaweedBlockClusterSpec{
MasterRef: &blockv1alpha1.MasterRef{Address: ""},
},
wantErr: "must not be empty",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validate(&tt.spec)
if tt.wantErr == "" {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
return
}
if err == nil {
t.Errorf("expected error containing %q, got nil", tt.wantErr)
return
}
if !containsString(err.Error(), tt.wantErr) {
t.Errorf("error %q does not contain %q", err.Error(), tt.wantErr)
}
})
}
}
// --- Test: Finalizer cleanup (deletion) ---
func TestReconcile_Deletion_CleansUpClusterScoped(t *testing.T) {
cluster := csiOnlyCluster()
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
// Create resources
reconcile(t, r, "test-block", "default")
reconcile(t, r, "test-block", "default")
ctx := context.Background()
// Verify CSIDriver exists
var csiDriver storagev1.CSIDriver
if err := c.Get(ctx, types.NamespacedName{Name: blockv1alpha1.CSIDriverName}, &csiDriver); err != nil {
t.Fatalf("CSIDriver should exist: %v", err)
}
// Mark for deletion (simulate kubectl delete)
var latest blockv1alpha1.SeaweedBlockCluster
if err := c.Get(ctx, types.NamespacedName{Name: "test-block", Namespace: "default"}, &latest); err != nil {
t.Fatal(err)
}
now := metav1.Now()
latest.DeletionTimestamp = &now
// We can't directly set DeletionTimestamp on fake client via Update,
// so we'll test the cleanup function directly
if err := r.cleanupOwnedResources(ctx, &latest); err != nil {
t.Fatalf("cleanup: %v", err)
}
// CSIDriver should be deleted
err := c.Get(ctx, types.NamespacedName{Name: blockv1alpha1.CSIDriverName}, &csiDriver)
if !apierrors.IsNotFound(err) {
t.Error("CSIDriver should be deleted after cleanup")
}
// ClusterRole should be deleted
var cr rbacv1.ClusterRole
err = c.Get(ctx, types.NamespacedName{Name: resources.ClusterRoleName()}, &cr)
if !apierrors.IsNotFound(err) {
t.Error("ClusterRole should be deleted after cleanup")
}
// StorageClass should be deleted
var sc storagev1.StorageClass
err = c.Get(ctx, types.NamespacedName{Name: "sw-block"}, &sc)
if !apierrors.IsNotFound(err) {
t.Error("StorageClass should be deleted after cleanup")
}
// H1 fix: cross-namespace CSI resources should also be deleted
var dep appsv1.Deployment
err = c.Get(ctx, types.NamespacedName{Name: "test-block-csi-controller", Namespace: "kube-system"}, &dep)
if !apierrors.IsNotFound(err) {
t.Error("CSI controller Deployment should be deleted after cleanup")
}
var ds appsv1.DaemonSet
err = c.Get(ctx, types.NamespacedName{Name: "test-block-csi-node", Namespace: "kube-system"}, &ds)
if !apierrors.IsNotFound(err) {
t.Error("CSI node DaemonSet should be deleted after cleanup")
}
var sa corev1.ServiceAccount
err = c.Get(ctx, types.NamespacedName{Name: "sw-block-csi", Namespace: "kube-system"}, &sa)
if !apierrors.IsNotFound(err) {
t.Error("ServiceAccount should be deleted after cleanup")
}
var secret corev1.Secret
err = c.Get(ctx, types.NamespacedName{Name: "test-block-chap", Namespace: "kube-system"}, &secret)
if !apierrors.IsNotFound(err) {
t.Error("CHAP Secret should be deleted after cleanup")
}
}
// --- Test: StorageClass adoption ---
func TestReconcile_StorageClass_AdoptExisting(t *testing.T) {
// Pre-existing SC without owner labels
existingSC := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "sw-block",
},
Provisioner: blockv1alpha1.CSIDriverName,
}
cluster := csiOnlyCluster()
cluster.Spec.AdoptExistingStorageClass = true
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster, existingSC).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "test-block", "default")
reconcile(t, r, "test-block", "default")
// SC should now have owner labels
var sc storagev1.StorageClass
if err := c.Get(context.Background(), types.NamespacedName{Name: "sw-block"}, &sc); err != nil {
t.Fatal(err)
}
if sc.Labels[blockv1alpha1.LabelOwnerName] != "test-block" {
t.Error("StorageClass should be adopted with owner labels")
}
}
func TestReconcile_StorageClass_OrphanWithoutAdopt_Failed(t *testing.T) {
existingSC := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "sw-block",
},
Provisioner: blockv1alpha1.CSIDriverName,
}
cluster := csiOnlyCluster()
// AdoptExistingStorageClass is false by default
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster, existingSC).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "test-block", "default")
reconcile(t, r, "test-block", "default")
var updated blockv1alpha1.SeaweedBlockCluster
_ = c.Get(context.Background(), types.NamespacedName{Name: "test-block", Namespace: "default"}, &updated)
if updated.Status.Phase != blockv1alpha1.PhaseFailed {
t.Errorf("phase = %q, want Failed for orphan SC without adopt", updated.Status.Phase)
}
}
// --- Test: CR not found returns nil ---
func TestReconcile_NotFound_NoError(t *testing.T) {
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
result, err := r.Reconcile(context.Background(), ctrl.Request{
NamespacedName: types.NamespacedName{Name: "nonexistent", Namespace: "default"},
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if result.Requeue {
t.Error("should not requeue for missing CR")
}
}
// --- Test: H2 — invalid storage size is caught by validation ---
func TestReconcile_InvalidStorageSize_Failed(t *testing.T) {
replicas := int32(1)
cluster := &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "bad-size", Namespace: "default", UID: "uid-bs",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{
Replicas: &replicas,
Storage: &blockv1alpha1.StorageSpec{Size: "not-a-size"},
},
},
}
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "bad-size", "default")
reconcile(t, r, "bad-size", "default")
var updated blockv1alpha1.SeaweedBlockCluster
_ = c.Get(context.Background(), types.NamespacedName{Name: "bad-size", Namespace: "default"}, &updated)
if updated.Status.Phase != blockv1alpha1.PhaseFailed {
t.Errorf("phase = %q, want Failed for invalid storage size", updated.Status.Phase)
}
}
// --- Test: M4 — validation uses ConditionValidationFailed, not ResourceConflict ---
func TestReconcile_ValidationUsesCorrectCondition(t *testing.T) {
cluster := &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "val-cond", Namespace: "default", UID: "uid-vc",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{}, // neither master nor masterRef
}
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "val-cond", "default")
reconcile(t, r, "val-cond", "default")
var updated blockv1alpha1.SeaweedBlockCluster
_ = c.Get(context.Background(), types.NamespacedName{Name: "val-cond", Namespace: "default"}, &updated)
// Should use ValidationFailed, NOT ResourceConflict
hasValidation := false
hasConflict := false
for _, cond := range updated.Status.Conditions {
if cond.Type == blockv1alpha1.ConditionValidationFailed {
hasValidation = true
}
if cond.Type == blockv1alpha1.ConditionResourceConflict {
hasConflict = true
}
}
if !hasValidation {
t.Error("validation errors should use ConditionValidationFailed")
}
if hasConflict {
t.Error("validation errors should NOT use ConditionResourceConflict")
}
}
// --- Test: L3 — port validation ---
func TestValidate_PortRange(t *testing.T) {
replicas := int32(1)
tests := []struct {
name string
spec blockv1alpha1.SeaweedBlockClusterSpec
wantErr bool
}{
{
name: "valid port",
spec: blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{Replicas: &replicas, Port: 9333},
},
},
{
name: "port 0 is valid (uses default)",
spec: blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{Replicas: &replicas},
},
},
{
name: "negative port",
spec: blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{Replicas: &replicas, Port: -1},
},
wantErr: true,
},
{
name: "port too high",
spec: blockv1alpha1.SeaweedBlockClusterSpec{
Master: &blockv1alpha1.MasterSpec{Replicas: &replicas, Port: 70000},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validate(&tt.spec)
if tt.wantErr && err == nil {
t.Error("expected error")
}
if !tt.wantErr && err != nil {
t.Errorf("unexpected error: %v", err)
}
})
}
}
// --- Test: L1 — FQDN master address ---
func TestReconcile_FullStack_UsesFQDN(t *testing.T) {
cluster := fullStackCluster()
scheme := testScheme()
c := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(cluster).
WithStatusSubresource(cluster).
Build()
r := &Reconciler{Client: c, Scheme: scheme}
reconcile(t, r, "test-full", "default")
reconcile(t, r, "test-full", "default")
var updated blockv1alpha1.SeaweedBlockCluster
_ = c.Get(context.Background(), types.NamespacedName{Name: "test-full", Namespace: "default"}, &updated)
expected := "test-full-master.default.svc.cluster.local:9333"
if updated.Status.MasterAddress != expected {
t.Errorf("masterAddress = %q, want %q", updated.Status.MasterAddress, expected)
}
}
func containsString(s, substr string) bool {
return len(s) >= len(substr) && searchString(s, substr)
}
func searchString(s, substr string) bool {
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
return true
}
}
return false
}

177
weed/storage/blockvol/operator/internal/controller/validate.go

@ -0,0 +1,177 @@
package controller
import (
"fmt"
"regexp"
"strings"
"k8s.io/apimachinery/pkg/api/resource"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
)
// maxDerivedNameSuffix is the longest suffix appended to the CR name when
// deriving sub-resource names. Currently "-csi-controller" (16 chars).
const maxDerivedNameSuffix = 16
// maxCRNameLength = 63 (DNS label limit) - maxDerivedNameSuffix.
const maxCRNameLength = 63 - maxDerivedNameSuffix
// dns1123LabelRegex matches RFC 1123 DNS labels (lowercase alphanumeric + hyphens,
// must start and end with alphanumeric).
var dns1123LabelRegex = regexp.MustCompile(`^[a-z0-9]([a-z0-9-]*[a-z0-9])?$`)
// reservedMasterFlags are flag prefixes that the operator sets on the master
// container and must not be overridden via ExtraArgs.
var reservedMasterFlags = []string{
"-port=", "-mdir=", "-ip.bind=", "-peers=",
}
// reservedVolumeFlags are flag prefixes that the operator sets on the volume
// container and must not be overridden via ExtraArgs.
var reservedVolumeFlags = []string{
"-port=", "-dir=", "-master=", "-ip.bind=",
"-block.dir=", "-block.listen=", "-block.portal=",
}
// validateName checks the CR name for DNS compatibility and derived name length.
// BUG-QA-3 fix: reject names whose derived sub-resource names exceed 63 chars.
// BUG-QA-5 fix: reject names that aren't valid RFC 1123 DNS labels.
func validateName(name string) error {
if name == "" {
return fmt.Errorf("metadata.name must not be empty")
}
if !dns1123LabelRegex.MatchString(name) {
return fmt.Errorf("metadata.name %q is not a valid DNS label (must match [a-z0-9]([a-z0-9-]*[a-z0-9])?)", name)
}
if len(name) > maxCRNameLength {
return fmt.Errorf("metadata.name %q is too long (%d chars); max is %d to keep derived resource names within the 63-char DNS label limit",
name, len(name), maxCRNameLength)
}
return nil
}
// validate checks the spec for invalid field combinations and values.
func validate(spec *blockv1alpha1.SeaweedBlockClusterSpec) error {
if spec.Master != nil && spec.MasterRef != nil {
return fmt.Errorf("spec.master and spec.masterRef are mutually exclusive")
}
if spec.Master == nil && spec.MasterRef == nil {
return fmt.Errorf("one of spec.master or spec.masterRef is required")
}
if spec.Volume != nil && spec.Master == nil {
return fmt.Errorf("spec.volume requires spec.master (full-stack mode)")
}
// BUG-QA-4 fix: separate error messages for replicas=0 vs replicas>1
if spec.Master != nil && spec.Master.Replicas != nil {
r := *spec.Master.Replicas
if r < 1 {
return fmt.Errorf("spec.master.replicas must be at least 1")
}
if r > 1 {
return fmt.Errorf("master HA (replicas > 1) deferred to Phase 9C")
}
}
if spec.MasterRef != nil && spec.MasterRef.Address == "" {
return fmt.Errorf("spec.masterRef.address must not be empty")
}
// H2 fix: validate storage size before builders attempt to parse it
if err := validateStorageSize("spec.master.storage.size", spec.Master != nil && spec.Master.Storage != nil, func() string {
if spec.Master != nil && spec.Master.Storage != nil {
return spec.Master.Storage.Size
}
return ""
}); err != nil {
return err
}
if err := validateStorageSize("spec.volume.storage.size", spec.Volume != nil && spec.Volume.Storage != nil, func() string {
if spec.Volume != nil && spec.Volume.Storage != nil {
return spec.Volume.Storage.Size
}
return ""
}); err != nil {
return err
}
// L3 fix: port range validation
if err := validatePort("spec.master.port", spec.Master, func(m *blockv1alpha1.MasterSpec) int32 { return m.Port }); err != nil {
return err
}
if err := validatePort("spec.master.grpcPort", spec.Master, func(m *blockv1alpha1.MasterSpec) int32 { return m.GRPCPort }); err != nil {
return err
}
if spec.Volume != nil {
if spec.Volume.Port != 0 && (spec.Volume.Port < 1 || spec.Volume.Port > 65535) {
return fmt.Errorf("spec.volume.port %d is out of range (1-65535)", spec.Volume.Port)
}
if spec.Volume.GRPCPort != 0 && (spec.Volume.GRPCPort < 1 || spec.Volume.GRPCPort > 65535) {
return fmt.Errorf("spec.volume.grpcPort %d is out of range (1-65535)", spec.Volume.GRPCPort)
}
if spec.Volume.BlockListenPort != 0 && (spec.Volume.BlockListenPort < 1 || spec.Volume.BlockListenPort > 65535) {
return fmt.Errorf("spec.volume.blockListenPort %d is out of range (1-65535)", spec.Volume.BlockListenPort)
}
}
// BUG-QA-1 fix: reject ExtraArgs that override operator-managed flags
if spec.Master != nil {
if err := validateExtraArgs("spec.master.extraArgs", spec.Master.ExtraArgs, reservedMasterFlags); err != nil {
return err
}
}
if spec.Volume != nil {
if err := validateExtraArgs("spec.volume.extraArgs", spec.Volume.ExtraArgs, reservedVolumeFlags); err != nil {
return err
}
}
return nil
}
// validateStorageSize validates a storage size string is parseable and positive.
// BUG-QA-7 fix: reject zero/negative sizes.
func validateStorageSize(fieldName string, hasStorage bool, getSize func() string) error {
if !hasStorage {
return nil
}
size := getSize()
if size == "" {
return nil
}
qty, err := resource.ParseQuantity(size)
if err != nil {
return fmt.Errorf("%s %q is invalid: %w", fieldName, size, err)
}
if qty.Cmp(resource.MustParse("1")) < 0 {
return fmt.Errorf("%s %q must be a positive value (e.g. \"1Gi\")", fieldName, size)
}
return nil
}
func validatePort[T any](name string, obj *T, getPort func(*T) int32) error {
if obj == nil {
return nil
}
port := getPort(obj)
if port != 0 && (port < 1 || port > 65535) {
return fmt.Errorf("%s %d is out of range (1-65535)", name, port)
}
return nil
}
// validateExtraArgs rejects extra args that start with any reserved flag prefix.
// BUG-QA-1 fix: prevents users from silently overriding operator-set flags
// like -port, -mdir, -block.listen etc., which would break probes/portals.
func validateExtraArgs(fieldName string, args []string, reserved []string) error {
for _, arg := range args {
for _, prefix := range reserved {
if strings.HasPrefix(arg, prefix) {
return fmt.Errorf("%s: flag %q conflicts with operator-managed flag %q; use the corresponding spec field instead",
fieldName, arg, strings.TrimSuffix(prefix, "="))
}
}
}
return nil
}

110
weed/storage/blockvol/operator/internal/resources/csi_controller.go

@ -0,0 +1,110 @@
package resources
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
)
// CSIControllerName returns the Deployment name for the CSI controller.
func CSIControllerName(cluster *blockv1alpha1.SeaweedBlockCluster) string {
return fmt.Sprintf("%s-csi-controller", cluster.Name)
}
// BuildCSIControllerDeployment constructs the CSI controller Deployment.
// Reference: csi/deploy/csi-controller.yaml
func BuildCSIControllerDeployment(cluster *blockv1alpha1.SeaweedBlockCluster, masterAddr, csiNS string) *appsv1.Deployment {
replicas := int32(blockv1alpha1.DefaultControllerReplicas)
if cluster.Spec.CSI != nil && cluster.Spec.CSI.ControllerReplicas != nil {
replicas = *cluster.Spec.CSI.ControllerReplicas
}
csiImage := cluster.Spec.CSIImage
if csiImage == "" {
csiImage = blockv1alpha1.DefaultCSIImage
}
provisionerImage := blockv1alpha1.DefaultProvisionerImage
if cluster.Spec.CSI != nil && cluster.Spec.CSI.ProvisionerImage != "" {
provisionerImage = cluster.Spec.CSI.ProvisionerImage
}
saName := ServiceAccountName()
name := CSIControllerName(cluster)
labels := ComponentLabels(cluster, "csi-controller")
selectorLabels := SelectorLabels(cluster, "csi-controller")
dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: csiNS,
Labels: labels,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: selectorLabels},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
Spec: corev1.PodSpec{
ServiceAccountName: saName,
Containers: []corev1.Container{
{
Name: "block-csi",
Image: csiImage,
ImagePullPolicy: blockv1alpha1.DefaultImagePullPolicy,
Args: []string{
"-endpoint=unix:///csi/csi.sock",
"-mode=controller",
fmt.Sprintf("-master=%s", masterAddr),
"-node-id=$(NODE_NAME)",
},
Env: []corev1.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []corev1.VolumeMount{
{Name: "socket-dir", MountPath: "/csi"},
},
},
{
Name: "csi-provisioner",
Image: provisionerImage,
ImagePullPolicy: blockv1alpha1.DefaultImagePullPolicy,
Args: []string{
"--csi-address=/csi/csi.sock",
"--feature-gates=Topology=true",
},
VolumeMounts: []corev1.VolumeMount{
{Name: "socket-dir", MountPath: "/csi"},
},
},
},
Volumes: []corev1.Volume{
{
Name: "socket-dir",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
},
},
},
},
}
// CSI resources live in csiNamespace (typically kube-system), which differs from
// the CR namespace. Cross-namespace ownerReferences are ignored by k8s GC, so we
// use ownership labels + finalizer cleanup instead.
SetCrossNamespaceOwnership(cluster, &dep.ObjectMeta)
return dep
}

29
weed/storage/blockvol/operator/internal/resources/csi_driver.go

@ -0,0 +1,29 @@
package resources
import (
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
)
// BuildCSIDriverResource constructs the CSIDriver cluster-scoped resource.
// Reference: csi/deploy/csi-driver.yaml
func BuildCSIDriverResource(cluster *blockv1alpha1.SeaweedBlockCluster) *storagev1.CSIDriver {
attachRequired := false
podInfoOnMount := false
return &storagev1.CSIDriver{
ObjectMeta: metav1.ObjectMeta{
Name: blockv1alpha1.CSIDriverName,
Labels: ClusterScopedLabels(cluster, "csi-driver"),
},
Spec: storagev1.CSIDriverSpec{
AttachRequired: &attachRequired,
PodInfoOnMount: &podInfoOnMount,
VolumeLifecycleModes: []storagev1.VolumeLifecycleMode{
storagev1.VolumeLifecyclePersistent,
},
},
}
}

156
weed/storage/blockvol/operator/internal/resources/csi_node.go

@ -0,0 +1,156 @@
package resources
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
)
// CSINodeName returns the DaemonSet name for the CSI node plugin.
func CSINodeName(cluster *blockv1alpha1.SeaweedBlockCluster) string {
return fmt.Sprintf("%s-csi-node", cluster.Name)
}
// BuildCSINodeDaemonSet constructs the CSI node DaemonSet.
// Reference: csi/deploy/csi-node.yaml
func BuildCSINodeDaemonSet(cluster *blockv1alpha1.SeaweedBlockCluster, csiNS string) *appsv1.DaemonSet {
csiImage := cluster.Spec.CSIImage
if csiImage == "" {
csiImage = blockv1alpha1.DefaultCSIImage
}
registrarImage := blockv1alpha1.DefaultRegistrarImage
if cluster.Spec.CSI != nil && cluster.Spec.CSI.RegistrarImage != "" {
registrarImage = cluster.Spec.CSI.RegistrarImage
}
saName := ServiceAccountName()
name := CSINodeName(cluster)
labels := ComponentLabels(cluster, "csi-node")
selectorLabels := SelectorLabels(cluster, "csi-node")
hostPathDir := corev1.HostPathDirectory
hostPathDirOrCreate := corev1.HostPathDirectoryOrCreate
privileged := true
bidirectional := corev1.MountPropagationBidirectional
hostNetwork := true
hostPID := true
ds := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: csiNS,
Labels: labels,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: selectorLabels},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
Spec: corev1.PodSpec{
ServiceAccountName: saName,
HostNetwork: hostNetwork,
HostPID: hostPID,
Containers: []corev1.Container{
{
Name: "block-csi",
Image: csiImage,
ImagePullPolicy: blockv1alpha1.DefaultImagePullPolicy,
Args: []string{
"-endpoint=unix:///csi/csi.sock",
"-mode=node",
"-node-id=$(NODE_NAME)",
},
Env: []corev1.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
SecurityContext: &corev1.SecurityContext{
Privileged: &privileged,
},
VolumeMounts: []corev1.VolumeMount{
{Name: "socket-dir", MountPath: "/csi"},
{Name: "kubelet-dir", MountPath: "/var/lib/kubelet", MountPropagation: &bidirectional},
{Name: "dev", MountPath: "/dev"},
{Name: "iscsi-dir", MountPath: "/etc/iscsi"},
},
},
{
Name: "csi-node-driver-registrar",
Image: registrarImage,
ImagePullPolicy: blockv1alpha1.DefaultImagePullPolicy,
Args: []string{
"--csi-address=/csi/csi.sock",
fmt.Sprintf("--kubelet-registration-path=/var/lib/kubelet/plugins/%s/csi.sock", blockv1alpha1.CSIDriverName),
},
VolumeMounts: []corev1.VolumeMount{
{Name: "socket-dir", MountPath: "/csi"},
{Name: "registration-dir", MountPath: "/registration"},
},
},
},
Volumes: []corev1.Volume{
{
Name: "socket-dir",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: fmt.Sprintf("/var/lib/kubelet/plugins/%s", blockv1alpha1.CSIDriverName),
Type: &hostPathDirOrCreate,
},
},
},
{
Name: "kubelet-dir",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/var/lib/kubelet",
Type: &hostPathDir,
},
},
},
{
Name: "dev",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/dev",
Type: &hostPathDir,
},
},
},
{
Name: "iscsi-dir",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/etc/iscsi",
Type: &hostPathDirOrCreate,
},
},
},
{
Name: "registration-dir",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/var/lib/kubelet/plugins_registry",
Type: &hostPathDir,
},
},
},
},
},
},
},
}
// Cross-namespace: use labels + finalizer, not ownerRef.
SetCrossNamespaceOwnership(cluster, &ds.ObjectMeta)
return ds
}

88
weed/storage/blockvol/operator/internal/resources/labels.go

@ -0,0 +1,88 @@
package resources
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
)
const (
labelApp = "app.kubernetes.io/name"
labelInstance = "app.kubernetes.io/instance"
labelComponent = "app.kubernetes.io/component"
labelManagedBy = "app.kubernetes.io/managed-by"
managerName = "sw-block-operator"
)
// CommonLabels returns the base label set for all operator-managed resources.
func CommonLabels(cluster *blockv1alpha1.SeaweedBlockCluster) map[string]string {
return map[string]string{
labelApp: "sw-block",
labelInstance: cluster.Name,
labelManagedBy: managerName,
}
}
// ComponentLabels returns labels for a specific component (e.g. "csi-controller", "master").
func ComponentLabels(cluster *blockv1alpha1.SeaweedBlockCluster, component string) map[string]string {
labels := CommonLabels(cluster)
labels[labelComponent] = component
return labels
}
// SelectorLabels returns the minimal label set used in matchLabels.
func SelectorLabels(cluster *blockv1alpha1.SeaweedBlockCluster, component string) map[string]string {
return map[string]string{
labelApp: "sw-block",
labelInstance: cluster.Name,
labelComponent: component,
}
}
// OwnerLabels returns labels that identify the owning CR for cluster-scoped resources.
func OwnerLabels(cluster *blockv1alpha1.SeaweedBlockCluster) map[string]string {
return map[string]string{
blockv1alpha1.LabelOwnerNamespace: cluster.Namespace,
blockv1alpha1.LabelOwnerName: cluster.Name,
}
}
// ClusterScopedLabels returns the full label set for cluster-scoped resources
// (or cross-namespace resources that cannot use ownerReference).
func ClusterScopedLabels(cluster *blockv1alpha1.SeaweedBlockCluster, component string) map[string]string {
labels := ComponentLabels(cluster, component)
for k, v := range OwnerLabels(cluster) {
labels[k] = v
}
return labels
}
// SetOwnerReference sets the ownerReference for same-namespace resources only.
// Cross-namespace resources must use ClusterScopedLabels + finalizer cleanup instead,
// because Kubernetes GC ignores cross-namespace ownerReferences.
func SetOwnerReference(cluster *blockv1alpha1.SeaweedBlockCluster, obj metav1.Object) {
t := true
obj.SetOwnerReferences([]metav1.OwnerReference{
{
APIVersion: blockv1alpha1.GroupVersion.String(),
Kind: "SeaweedBlockCluster",
Name: cluster.Name,
UID: cluster.UID,
Controller: &t,
BlockOwnerDeletion: &t,
},
})
}
// SetCrossNamespaceOwnership sets ownership labels (not ownerRef) on resources that
// live in a different namespace than the CR. These are cleaned up by the finalizer.
func SetCrossNamespaceOwnership(cluster *blockv1alpha1.SeaweedBlockCluster, obj metav1.Object) {
labels := obj.GetLabels()
if labels == nil {
labels = make(map[string]string)
}
labels[blockv1alpha1.LabelOwnerNamespace] = cluster.Namespace
labels[blockv1alpha1.LabelOwnerName] = cluster.Name
obj.SetLabels(labels)
}

168
weed/storage/blockvol/operator/internal/resources/master.go

@ -0,0 +1,168 @@
package resources
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
)
// MasterServiceName returns the headless Service name for the master.
func MasterServiceName(cluster *blockv1alpha1.SeaweedBlockCluster) string {
return fmt.Sprintf("%s-master", cluster.Name)
}
// BuildMasterService constructs the headless Service for master StatefulSet.
func BuildMasterService(cluster *blockv1alpha1.SeaweedBlockCluster) *corev1.Service {
port := cluster.Spec.Master.Port
if port == 0 {
port = blockv1alpha1.DefaultMasterPort
}
grpcPort := cluster.Spec.Master.GRPCPort
if grpcPort == 0 {
grpcPort = blockv1alpha1.DefaultMasterGRPCPort
}
name := MasterServiceName(cluster)
labels := ComponentLabels(cluster, "master")
selectorLabels := SelectorLabels(cluster, "master")
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: cluster.Namespace,
Labels: labels,
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
Selector: selectorLabels,
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{Name: "http", Port: port, TargetPort: intstr.FromInt32(port)},
{Name: "grpc", Port: grpcPort, TargetPort: intstr.FromInt32(grpcPort)},
},
},
}
SetOwnerReference(cluster, &svc.ObjectMeta)
return svc
}
// BuildMasterStatefulSet constructs the master StatefulSet.
func BuildMasterStatefulSet(cluster *blockv1alpha1.SeaweedBlockCluster) *appsv1.StatefulSet {
ms := cluster.Spec.Master
replicas := int32(1)
if ms.Replicas != nil {
replicas = *ms.Replicas
}
port := ms.Port
if port == 0 {
port = blockv1alpha1.DefaultMasterPort
}
name := MasterServiceName(cluster)
labels := ComponentLabels(cluster, "master")
selectorLabels := SelectorLabels(cluster, "master")
// Build peers string for single-replica: "<name>-master-0.<name>-master.<ns>:<port>"
peers := fmt.Sprintf("%s-0.%s.%s:%d", name, name, cluster.Namespace, port)
args := []string{
"master",
fmt.Sprintf("-port=%d", port),
"-mdir=/data",
"-ip.bind=0.0.0.0",
fmt.Sprintf("-peers=%s", peers),
}
args = append(args, ms.ExtraArgs...)
image := cluster.Spec.Image
if image == "" {
image = blockv1alpha1.DefaultImage
}
sts := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: cluster.Namespace,
Labels: labels,
},
Spec: appsv1.StatefulSetSpec{
ServiceName: name,
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: selectorLabels},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "master",
Image: image,
ImagePullPolicy: blockv1alpha1.DefaultImagePullPolicy,
Command: []string{"/usr/bin/weed"},
Args: args,
Ports: []corev1.ContainerPort{
{Name: "http", ContainerPort: port},
{Name: "grpc", ContainerPort: ms.GRPCPort},
},
VolumeMounts: []corev1.VolumeMount{
{Name: "data", MountPath: "/data"},
},
ReadinessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/cluster/status",
Port: intstr.FromInt32(port),
},
},
InitialDelaySeconds: 10,
PeriodSeconds: 15,
},
Resources: ms.Resources,
},
},
},
},
},
}
// VolumeClaimTemplate for data
if ms.Storage != nil && ms.Storage.Size != "" {
pvcSpec := corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
Resources: corev1.VolumeResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse(ms.Storage.Size), // safe: validated by controller
},
},
}
// M3 fix: wire StorageClassName into PVC
if ms.Storage.StorageClassName != nil {
pvcSpec.StorageClassName = ms.Storage.StorageClassName
}
sts.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "data"},
Spec: pvcSpec,
},
}
} else {
// Use emptyDir if no storage specified
sts.Spec.Template.Spec.Volumes = []corev1.Volume{
{
Name: "data",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
}
}
SetOwnerReference(cluster, &sts.ObjectMeta)
return sts
}

62
weed/storage/blockvol/operator/internal/resources/ownership.go

@ -0,0 +1,62 @@
package resources
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
)
// OwnershipStatus represents the result of checking cluster-scoped resource ownership.
type OwnershipStatus int
const (
// OwnershipFree means the resource does not exist.
OwnershipFree OwnershipStatus = iota
// OwnershipOwned means the resource exists and is owned by this CR.
OwnershipOwned
// OwnershipConflict means the resource exists but is owned by a different CR.
OwnershipConflict
// OwnershipOrphan means the resource exists but has no ownership labels.
OwnershipOrphan
)
// CheckOwnership determines if a cluster-scoped resource is owned by the given CR.
func CheckOwnership(obj metav1.Object, cluster *blockv1alpha1.SeaweedBlockCluster) OwnershipStatus {
labels := obj.GetLabels()
if labels == nil {
return OwnershipOrphan
}
ns, hasNS := labels[blockv1alpha1.LabelOwnerNamespace]
name, hasName := labels[blockv1alpha1.LabelOwnerName]
if !hasNS && !hasName {
return OwnershipOrphan
}
if ns == cluster.Namespace && name == cluster.Name {
return OwnershipOwned
}
return OwnershipConflict
}
// ConflictOwner returns a human-readable "<namespace>/<name>" string identifying the
// conflicting owner of a cluster-scoped resource.
func ConflictOwner(obj metav1.Object) string {
labels := obj.GetLabels()
if labels == nil {
return "unknown"
}
return fmt.Sprintf("%s/%s",
labels[blockv1alpha1.LabelOwnerNamespace],
labels[blockv1alpha1.LabelOwnerName],
)
}
// IsOwnedBy returns true if the cluster-scoped resource's owner labels match the given CR.
func IsOwnedBy(obj metav1.Object, cluster *blockv1alpha1.SeaweedBlockCluster) bool {
return CheckOwnership(obj, cluster) == OwnershipOwned
}

110
weed/storage/blockvol/operator/internal/resources/rbac.go

@ -0,0 +1,110 @@
package resources
import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
)
const (
clusterRoleName = "sw-block-csi"
clusterRoleBinding = "sw-block-csi"
serviceAccountName = "sw-block-csi"
)
// ServiceAccountName returns the fixed CSI service account name.
func ServiceAccountName() string { return serviceAccountName }
// ClusterRoleName returns the fixed CSI ClusterRole name.
func ClusterRoleName() string { return clusterRoleName }
// ClusterRoleBindingName returns the fixed CSI ClusterRoleBinding name.
func ClusterRoleBindingName() string { return clusterRoleBinding }
// BuildServiceAccount constructs the CSI ServiceAccount.
// Reference: csi/deploy/rbac.yaml
func BuildServiceAccount(cluster *blockv1alpha1.SeaweedBlockCluster, csiNS string) *corev1.ServiceAccount {
sa := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
Namespace: csiNS,
Labels: ComponentLabels(cluster, "csi-rbac"),
},
}
// Cross-namespace: use labels + finalizer, not ownerRef.
SetCrossNamespaceOwnership(cluster, &sa.ObjectMeta)
return sa
}
// BuildClusterRole constructs the CSI ClusterRole.
// Reference: csi/deploy/rbac.yaml
func BuildClusterRole(cluster *blockv1alpha1.SeaweedBlockCluster) *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRoleName,
Labels: ClusterScopedLabels(cluster, "csi-rbac"),
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"persistentvolumes"},
Verbs: []string{"get", "list", "watch", "create", "delete"},
},
{
APIGroups: []string{""},
Resources: []string{"persistentvolumeclaims"},
Verbs: []string{"get", "list", "watch", "update"},
},
{
APIGroups: []string{"storage.k8s.io"},
Resources: []string{"storageclasses"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{""},
Resources: []string{"events"},
Verbs: []string{"list", "watch", "create", "update", "patch"},
},
{
APIGroups: []string{"storage.k8s.io"},
Resources: []string{"csinodes"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{"storage.k8s.io"},
Resources: []string{"volumeattachments"},
Verbs: []string{"get", "list", "watch"},
},
},
}
}
// BuildClusterRoleBinding constructs the CSI ClusterRoleBinding.
// Reference: csi/deploy/rbac.yaml
func BuildClusterRoleBinding(cluster *blockv1alpha1.SeaweedBlockCluster, csiNS string) *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRoleBinding,
Labels: ClusterScopedLabels(cluster, "csi-rbac"),
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccountName,
Namespace: csiNS,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: clusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
}

976
weed/storage/blockvol/operator/internal/resources/resources_test.go

@ -0,0 +1,976 @@
package resources
import (
"testing"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
)
// --- Helpers ---
func testCluster() *blockv1alpha1.SeaweedBlockCluster {
return &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-block",
Namespace: "default",
UID: "test-uid-123",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{
CSIImage: "sw-block-csi:local",
MasterRef: &blockv1alpha1.MasterRef{
Address: "seaweedfs-master.default:9333",
},
StorageClassName: "sw-block",
CSINamespace: "kube-system",
},
}
}
func findContainer(containers []corev1.Container, name string) *corev1.Container {
for i := range containers {
if containers[i].Name == name {
return &containers[i]
}
}
return nil
}
func findVolume(volumes []corev1.Volume, name string) *corev1.Volume {
for i := range volumes {
if volumes[i].Name == name {
return &volumes[i]
}
}
return nil
}
func findVolumeMount(mounts []corev1.VolumeMount, name string) *corev1.VolumeMount {
for i := range mounts {
if mounts[i].Name == name {
return &mounts[i]
}
}
return nil
}
func containsArg(args []string, target string) bool {
for _, a := range args {
if a == target {
return true
}
}
return false
}
// --- CSI Controller Deployment Tests ---
func TestCSIControllerDeployment_GoldenConformance(t *testing.T) {
cluster := testCluster()
dep := BuildCSIControllerDeployment(cluster, "seaweedfs-master.default:9333", "kube-system")
// Name and namespace match reference
if dep.Name != "my-block-csi-controller" {
t.Errorf("name = %q, want my-block-csi-controller", dep.Name)
}
if dep.Namespace != "kube-system" {
t.Errorf("namespace = %q, want kube-system", dep.Namespace)
}
// Replicas
if dep.Spec.Replicas == nil || *dep.Spec.Replicas != 1 {
t.Error("replicas should be 1")
}
// Service account
if dep.Spec.Template.Spec.ServiceAccountName != "sw-block-csi" {
t.Errorf("serviceAccountName = %q, want sw-block-csi", dep.Spec.Template.Spec.ServiceAccountName)
}
// Container: block-csi
csi := findContainer(dep.Spec.Template.Spec.Containers, "block-csi")
if csi == nil {
t.Fatal("missing block-csi container")
}
if csi.Image != "sw-block-csi:local" {
t.Errorf("block-csi image = %q, want sw-block-csi:local", csi.Image)
}
if !containsArg(csi.Args, "-endpoint=unix:///csi/csi.sock") {
t.Error("missing -endpoint arg")
}
if !containsArg(csi.Args, "-mode=controller") {
t.Error("missing -mode=controller arg")
}
if !containsArg(csi.Args, "-master=seaweedfs-master.default:9333") {
t.Error("missing -master arg")
}
if !containsArg(csi.Args, "-node-id=$(NODE_NAME)") {
t.Error("missing -node-id arg")
}
// Container: csi-provisioner
prov := findContainer(dep.Spec.Template.Spec.Containers, "csi-provisioner")
if prov == nil {
t.Fatal("missing csi-provisioner container")
}
if prov.Image != blockv1alpha1.DefaultProvisionerImage {
t.Errorf("provisioner image = %q, want %s", prov.Image, blockv1alpha1.DefaultProvisionerImage)
}
if !containsArg(prov.Args, "--csi-address=/csi/csi.sock") {
t.Error("missing --csi-address arg")
}
if !containsArg(prov.Args, "--feature-gates=Topology=true") {
t.Error("missing --feature-gates arg")
}
// Volume: socket-dir (emptyDir)
socketVol := findVolume(dep.Spec.Template.Spec.Volumes, "socket-dir")
if socketVol == nil {
t.Fatal("missing socket-dir volume")
}
if socketVol.EmptyDir == nil {
t.Error("socket-dir should be emptyDir")
}
// Cross-namespace ownership: uses labels, not ownerRef
if len(dep.OwnerReferences) != 0 {
t.Errorf("CSI controller should NOT have ownerRef (cross-namespace); got %d", len(dep.OwnerReferences))
}
if dep.Labels[blockv1alpha1.LabelOwnerNamespace] != "default" {
t.Error("missing owner-namespace label on CSI controller")
}
if dep.Labels[blockv1alpha1.LabelOwnerName] != "my-block" {
t.Error("missing owner-name label on CSI controller")
}
}
func TestCSIControllerDeployment_CustomImages(t *testing.T) {
cluster := testCluster()
replicas := int32(2)
cluster.Spec.CSI = &blockv1alpha1.CSISpec{
ControllerReplicas: &replicas,
ProvisionerImage: "custom-provisioner:v1",
}
cluster.Spec.CSIImage = "custom-csi:v2"
dep := BuildCSIControllerDeployment(cluster, "master:9333", "kube-system")
if *dep.Spec.Replicas != 2 {
t.Errorf("replicas = %d, want 2", *dep.Spec.Replicas)
}
csi := findContainer(dep.Spec.Template.Spec.Containers, "block-csi")
if csi.Image != "custom-csi:v2" {
t.Errorf("csi image = %q, want custom-csi:v2", csi.Image)
}
prov := findContainer(dep.Spec.Template.Spec.Containers, "csi-provisioner")
if prov.Image != "custom-provisioner:v1" {
t.Errorf("provisioner image = %q, want custom-provisioner:v1", prov.Image)
}
}
// --- CSI Node DaemonSet Tests ---
func TestCSINodeDaemonSet_GoldenConformance(t *testing.T) {
cluster := testCluster()
ds := BuildCSINodeDaemonSet(cluster, "kube-system")
// Name and namespace
if ds.Name != "my-block-csi-node" {
t.Errorf("name = %q, want my-block-csi-node", ds.Name)
}
if ds.Namespace != "kube-system" {
t.Errorf("namespace = %q, want kube-system", ds.Namespace)
}
spec := ds.Spec.Template.Spec
// hostNetwork and hostPID per reference
if !spec.HostNetwork {
t.Error("hostNetwork should be true")
}
if !spec.HostPID {
t.Error("hostPID should be true")
}
// block-csi container: privileged
csi := findContainer(spec.Containers, "block-csi")
if csi == nil {
t.Fatal("missing block-csi container")
}
if csi.SecurityContext == nil || csi.SecurityContext.Privileged == nil || !*csi.SecurityContext.Privileged {
t.Error("block-csi must be privileged")
}
// Volume mounts match reference
for _, expected := range []string{"socket-dir", "kubelet-dir", "dev", "iscsi-dir"} {
if findVolumeMount(csi.VolumeMounts, expected) == nil {
t.Errorf("missing volume mount %q on block-csi", expected)
}
}
// kubelet-dir mount propagation
kubeletMount := findVolumeMount(csi.VolumeMounts, "kubelet-dir")
if kubeletMount.MountPropagation == nil || *kubeletMount.MountPropagation != corev1.MountPropagationBidirectional {
t.Error("kubelet-dir mount propagation should be Bidirectional")
}
// Registrar container
reg := findContainer(spec.Containers, "csi-node-driver-registrar")
if reg == nil {
t.Fatal("missing csi-node-driver-registrar container")
}
expectedRegPath := "--kubelet-registration-path=/var/lib/kubelet/plugins/block.csi.seaweedfs.com/csi.sock"
if !containsArg(reg.Args, expectedRegPath) {
t.Errorf("missing registrar arg %q", expectedRegPath)
}
// Volumes: socket-dir should be hostPath DirectoryOrCreate
socketVol := findVolume(spec.Volumes, "socket-dir")
if socketVol == nil || socketVol.HostPath == nil {
t.Fatal("socket-dir should be hostPath")
}
expectedPath := "/var/lib/kubelet/plugins/block.csi.seaweedfs.com"
if socketVol.HostPath.Path != expectedPath {
t.Errorf("socket-dir path = %q, want %q", socketVol.HostPath.Path, expectedPath)
}
// registration-dir volume
regVol := findVolume(spec.Volumes, "registration-dir")
if regVol == nil || regVol.HostPath == nil {
t.Fatal("registration-dir should be hostPath")
}
if regVol.HostPath.Path != "/var/lib/kubelet/plugins_registry" {
t.Errorf("registration-dir path = %q, want /var/lib/kubelet/plugins_registry", regVol.HostPath.Path)
}
}
// --- CSI Driver Resource Tests ---
func TestCSIDriverResource_GoldenConformance(t *testing.T) {
cluster := testCluster()
drv := BuildCSIDriverResource(cluster)
if drv.Name != blockv1alpha1.CSIDriverName {
t.Errorf("name = %q, want %s", drv.Name, blockv1alpha1.CSIDriverName)
}
if drv.Spec.AttachRequired == nil || *drv.Spec.AttachRequired {
t.Error("attachRequired should be false")
}
if drv.Spec.PodInfoOnMount == nil || *drv.Spec.PodInfoOnMount {
t.Error("podInfoOnMount should be false")
}
if len(drv.Spec.VolumeLifecycleModes) != 1 || drv.Spec.VolumeLifecycleModes[0] != storagev1.VolumeLifecyclePersistent {
t.Error("volumeLifecycleModes should be [Persistent]")
}
// Ownership labels
if drv.Labels[blockv1alpha1.LabelOwnerNamespace] != "default" {
t.Error("missing owner-namespace label")
}
if drv.Labels[blockv1alpha1.LabelOwnerName] != "my-block" {
t.Error("missing owner-name label")
}
}
// --- RBAC Tests ---
func TestRBAC_GoldenConformance(t *testing.T) {
cluster := testCluster()
// ServiceAccount
sa := BuildServiceAccount(cluster, "kube-system")
if sa.Name != "sw-block-csi" {
t.Errorf("SA name = %q, want sw-block-csi", sa.Name)
}
if sa.Namespace != "kube-system" {
t.Errorf("SA namespace = %q, want kube-system", sa.Namespace)
}
// ClusterRole
cr := BuildClusterRole(cluster)
if cr.Name != "sw-block-csi" {
t.Errorf("ClusterRole name = %q, want sw-block-csi", cr.Name)
}
// Verify key rules match reference
expectedResources := map[string][]string{
"persistentvolumes": {"get", "list", "watch", "create", "delete"},
"persistentvolumeclaims": {"get", "list", "watch", "update"},
"storageclasses": {"get", "list", "watch"},
"events": {"list", "watch", "create", "update", "patch"},
"csinodes": {"get", "list", "watch"},
"nodes": {"get", "list", "watch"},
"volumeattachments": {"get", "list", "watch"},
}
for _, rule := range cr.Rules {
for _, res := range rule.Resources {
expected, ok := expectedResources[res]
if !ok {
continue
}
if len(rule.Verbs) != len(expected) {
t.Errorf("resource %q: got %d verbs, want %d", res, len(rule.Verbs), len(expected))
}
delete(expectedResources, res)
}
}
for res := range expectedResources {
t.Errorf("missing RBAC rule for resource %q", res)
}
// ClusterRoleBinding
crb := BuildClusterRoleBinding(cluster, "kube-system")
if crb.Name != "sw-block-csi" {
t.Errorf("CRB name = %q, want sw-block-csi", crb.Name)
}
if len(crb.Subjects) != 1 {
t.Fatal("expected 1 subject")
}
if crb.Subjects[0].Name != "sw-block-csi" || crb.Subjects[0].Namespace != "kube-system" {
t.Errorf("subject = %+v, want sw-block-csi in kube-system", crb.Subjects[0])
}
if crb.RoleRef.Name != "sw-block-csi" {
t.Errorf("roleRef name = %q, want sw-block-csi", crb.RoleRef.Name)
}
}
// --- StorageClass Tests ---
func TestStorageClass_GoldenConformance(t *testing.T) {
cluster := testCluster()
sc := BuildStorageClass(cluster)
if sc.Name != "sw-block" {
t.Errorf("name = %q, want sw-block", sc.Name)
}
if sc.Provisioner != blockv1alpha1.CSIDriverName {
t.Errorf("provisioner = %q, want %s", sc.Provisioner, blockv1alpha1.CSIDriverName)
}
if sc.VolumeBindingMode == nil || *sc.VolumeBindingMode != storagev1.VolumeBindingWaitForFirstConsumer {
t.Error("volumeBindingMode should be WaitForFirstConsumer")
}
if sc.ReclaimPolicy == nil || *sc.ReclaimPolicy != corev1.PersistentVolumeReclaimDelete {
t.Error("reclaimPolicy should be Delete")
}
// Ownership labels
if sc.Labels[blockv1alpha1.LabelOwnerNamespace] != "default" {
t.Error("missing owner-namespace label")
}
}
func TestStorageClass_CustomName(t *testing.T) {
cluster := testCluster()
cluster.Spec.StorageClassName = "custom-block"
sc := BuildStorageClass(cluster)
if sc.Name != "custom-block" {
t.Errorf("name = %q, want custom-block", sc.Name)
}
}
// --- Secret Tests ---
func TestCHAPSecret_Generated(t *testing.T) {
cluster := testCluster()
secret, err := BuildCHAPSecret(cluster, "kube-system")
if err != nil {
t.Fatalf("BuildCHAPSecret: %v", err)
}
if secret.Name != "my-block-chap" {
t.Errorf("name = %q, want my-block-chap", secret.Name)
}
if secret.Namespace != "kube-system" {
t.Errorf("namespace = %q, want kube-system", secret.Namespace)
}
if len(secret.Data["password"]) == 0 {
t.Error("password should not be empty")
}
if string(secret.Data["username"]) != "chap-user" {
t.Errorf("username = %q, want chap-user", string(secret.Data["username"]))
}
// Password should be 64 hex chars (32 bytes)
if len(secret.Data["password"]) != 64 {
t.Errorf("password length = %d, want 64 hex chars", len(secret.Data["password"]))
}
// Last-rotated annotation should exist
if _, ok := secret.Annotations[blockv1alpha1.AnnotationLastRotated]; !ok {
t.Error("missing last-rotated annotation")
}
}
func TestCHAPSecret_RotationDetection(t *testing.T) {
cluster := testCluster()
// No rotation annotation → no rotation needed
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
blockv1alpha1.AnnotationLastRotated: time.Now().UTC().Format(time.RFC3339),
},
},
}
if NeedsRotation(cluster, secret) {
t.Error("should not need rotation without annotation")
}
// Set rotation annotation in the future
cluster.Annotations = map[string]string{
blockv1alpha1.AnnotationRotateSecret: time.Now().Add(time.Hour).UTC().Format(time.RFC3339),
}
if !NeedsRotation(cluster, secret) {
t.Error("should need rotation when annotation is newer")
}
// Set rotation annotation in the past
cluster.Annotations[blockv1alpha1.AnnotationRotateSecret] = time.Now().Add(-2 * time.Hour).UTC().Format(time.RFC3339)
if NeedsRotation(cluster, secret) {
t.Error("should not need rotation when annotation is older")
}
}
func TestCHAPSecret_Regeneration(t *testing.T) {
cluster := testCluster()
secret, _ := BuildCHAPSecret(cluster, "kube-system")
oldPassword := string(secret.Data["password"])
if err := RegenerateCHAPPassword(secret); err != nil {
t.Fatalf("RegenerateCHAPPassword: %v", err)
}
newPassword := string(secret.Data["password"])
if newPassword == oldPassword {
t.Error("password should change after regeneration")
}
if len(newPassword) != 64 {
t.Errorf("new password length = %d, want 64", len(newPassword))
}
}
// --- Ownership Tests ---
func TestOwnership_CheckVariants(t *testing.T) {
cluster := testCluster()
// Owned by this CR
owned := &metav1.ObjectMeta{
Labels: map[string]string{
blockv1alpha1.LabelOwnerNamespace: "default",
blockv1alpha1.LabelOwnerName: "my-block",
},
}
if CheckOwnership(owned, cluster) != OwnershipOwned {
t.Error("should be OwnershipOwned")
}
// Owned by different CR
conflict := &metav1.ObjectMeta{
Labels: map[string]string{
blockv1alpha1.LabelOwnerNamespace: "other-ns",
blockv1alpha1.LabelOwnerName: "other-block",
},
}
if CheckOwnership(conflict, cluster) != OwnershipConflict {
t.Error("should be OwnershipConflict")
}
// No labels
orphan := &metav1.ObjectMeta{}
if CheckOwnership(orphan, cluster) != OwnershipOrphan {
t.Error("should be OwnershipOrphan")
}
// Labels present but no owner labels
noOwner := &metav1.ObjectMeta{
Labels: map[string]string{"foo": "bar"},
}
if CheckOwnership(noOwner, cluster) != OwnershipOrphan {
t.Error("should be OwnershipOrphan for labels without owner keys")
}
}
func TestConflictOwner(t *testing.T) {
obj := &metav1.ObjectMeta{
Labels: map[string]string{
blockv1alpha1.LabelOwnerNamespace: "prod",
blockv1alpha1.LabelOwnerName: "block-1",
},
}
result := ConflictOwner(obj)
if result != "prod/block-1" {
t.Errorf("ConflictOwner = %q, want prod/block-1", result)
}
}
// --- Labels Tests ---
func TestLabels_CommonAndComponent(t *testing.T) {
cluster := testCluster()
common := CommonLabels(cluster)
if common[labelApp] != "sw-block" {
t.Error("missing app label")
}
if common[labelInstance] != "my-block" {
t.Error("missing instance label")
}
if common[labelManagedBy] != "sw-block-operator" {
t.Error("missing managed-by label")
}
comp := ComponentLabels(cluster, "master")
if comp[labelComponent] != "master" {
t.Error("missing component label")
}
sel := SelectorLabels(cluster, "csi-node")
if len(sel) != 3 {
t.Errorf("selector labels count = %d, want 3", len(sel))
}
}
// --- CSI Controller cross-NS ownership test ---
func TestCSIController_CrossNamespaceOwnership(t *testing.T) {
cluster := testCluster()
dep := BuildCSIControllerDeployment(cluster, "master:9333", "kube-system")
// Cross-namespace resources must NOT have ownerRef (k8s GC ignores them)
if len(dep.OwnerReferences) != 0 {
t.Errorf("cross-namespace resources should not have ownerRef, got %d", len(dep.OwnerReferences))
}
// Must have ownership labels for finalizer cleanup
if dep.Labels[blockv1alpha1.LabelOwnerNamespace] != "default" {
t.Error("missing owner-namespace label")
}
if dep.Labels[blockv1alpha1.LabelOwnerName] != "my-block" {
t.Error("missing owner-name label")
}
}
// --- Same-namespace resources DO get ownerRef ---
func TestMasterStatefulSet_HasOwnerRef(t *testing.T) {
cluster := fullStackCluster()
sts := BuildMasterStatefulSet(cluster)
if len(sts.OwnerReferences) != 1 {
t.Fatalf("same-namespace resources should have ownerRef, got %d", len(sts.OwnerReferences))
}
if sts.OwnerReferences[0].Kind != "SeaweedBlockCluster" {
t.Errorf("ownerRef kind = %q", sts.OwnerReferences[0].Kind)
}
}
// --- CSI Node cross-NS ownership ---
func TestCSINode_CrossNamespaceOwnership(t *testing.T) {
cluster := testCluster()
ds := BuildCSINodeDaemonSet(cluster, "kube-system")
if len(ds.OwnerReferences) != 0 {
t.Errorf("CSI node should not have ownerRef (cross-namespace), got %d", len(ds.OwnerReferences))
}
if ds.Labels[blockv1alpha1.LabelOwnerName] != "my-block" {
t.Error("missing owner-name label on CSI node")
}
}
// --- ServiceAccount cross-NS ownership ---
func TestServiceAccount_CrossNamespaceOwnership(t *testing.T) {
cluster := testCluster()
sa := BuildServiceAccount(cluster, "kube-system")
if len(sa.OwnerReferences) != 0 {
t.Errorf("SA should not have ownerRef (cross-namespace), got %d", len(sa.OwnerReferences))
}
if sa.Labels[blockv1alpha1.LabelOwnerName] != "my-block" {
t.Error("missing owner-name label on SA")
}
}
// --- CHAP Secret cross-NS ownership ---
func TestCHAPSecret_CrossNamespaceOwnership(t *testing.T) {
cluster := testCluster()
secret, err := BuildCHAPSecret(cluster, "kube-system")
if err != nil {
t.Fatal(err)
}
if len(secret.OwnerReferences) != 0 {
t.Errorf("CHAP secret should not have ownerRef (cross-namespace), got %d", len(secret.OwnerReferences))
}
if secret.Labels[blockv1alpha1.LabelOwnerName] != "my-block" {
t.Error("missing owner-name label on CHAP secret")
}
}
// --- Conformance: all builders produce labeled output ---
func TestAllBuilders_HaveLabels(t *testing.T) {
cluster := testCluster()
checks := []struct {
name string
labels map[string]string
}{
{"CSIController", BuildCSIControllerDeployment(cluster, "m:9333", "kube-system").Labels},
{"CSINode", BuildCSINodeDaemonSet(cluster, "kube-system").Labels},
{"CSIDriver", BuildCSIDriverResource(cluster).Labels},
{"ClusterRole", BuildClusterRole(cluster).Labels},
{"CRB", BuildClusterRoleBinding(cluster, "kube-system").Labels},
{"StorageClass", BuildStorageClass(cluster).Labels},
{"ServiceAccount", BuildServiceAccount(cluster, "kube-system").Labels},
}
for _, c := range checks {
t.Run(c.name, func(t *testing.T) {
if c.labels[labelApp] != "sw-block" {
t.Errorf("%s: missing app label", c.name)
}
if c.labels[labelManagedBy] != "sw-block-operator" {
t.Errorf("%s: missing managed-by label", c.name)
}
})
}
}
// --- Type assertion: CSI node must produce DaemonSet ---
func TestCSINode_IsDaemonSet(t *testing.T) {
cluster := testCluster()
ds := BuildCSINodeDaemonSet(cluster, "kube-system")
var _ *appsv1.DaemonSet = ds // compile-time type check
}
func TestCSIController_IsDeployment(t *testing.T) {
cluster := testCluster()
dep := BuildCSIControllerDeployment(cluster, "m:9333", "kube-system")
var _ *appsv1.Deployment = dep // compile-time type check
}
// --- Full-Stack: Master Builder Tests ---
func fullStackCluster() *blockv1alpha1.SeaweedBlockCluster {
replicas := int32(1)
return &blockv1alpha1.SeaweedBlockCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-block",
Namespace: "default",
UID: "test-uid-456",
},
Spec: blockv1alpha1.SeaweedBlockClusterSpec{
Image: "chrislusf/seaweedfs:latest",
CSIImage: "sw-block-csi:local",
Master: &blockv1alpha1.MasterSpec{
Replicas: &replicas,
Port: 9333,
GRPCPort: 19333,
Storage: &blockv1alpha1.StorageSpec{Size: "5Gi"},
},
Volume: &blockv1alpha1.VolumeSpec{
Replicas: &replicas,
Port: 8080,
GRPCPort: 18080,
BlockDir: "/data1/block",
BlockListenPort: 3260,
Storage: &blockv1alpha1.StorageSpec{Size: "50Gi"},
},
StorageClassName: "sw-block",
CSINamespace: "kube-system",
},
}
}
func TestMasterService_GoldenConformance(t *testing.T) {
cluster := fullStackCluster()
svc := BuildMasterService(cluster)
if svc.Name != "my-block-master" {
t.Errorf("name = %q, want my-block-master", svc.Name)
}
if svc.Namespace != "default" {
t.Errorf("namespace = %q, want default", svc.Namespace)
}
if svc.Spec.ClusterIP != "None" {
t.Error("should be headless (ClusterIP=None)")
}
if !svc.Spec.PublishNotReadyAddresses {
t.Error("publishNotReadyAddresses should be true")
}
if len(svc.Spec.Ports) != 2 {
t.Fatalf("ports = %d, want 2", len(svc.Spec.Ports))
}
if svc.Spec.Ports[0].Port != 9333 {
t.Errorf("http port = %d, want 9333", svc.Spec.Ports[0].Port)
}
if svc.Spec.Ports[1].Port != 19333 {
t.Errorf("grpc port = %d, want 19333", svc.Spec.Ports[1].Port)
}
}
func TestMasterStatefulSet_GoldenConformance(t *testing.T) {
cluster := fullStackCluster()
sts := BuildMasterStatefulSet(cluster)
if sts.Name != "my-block-master" {
t.Errorf("name = %q, want my-block-master", sts.Name)
}
if sts.Spec.ServiceName != "my-block-master" {
t.Errorf("serviceName = %q, want my-block-master", sts.Spec.ServiceName)
}
if sts.Spec.Replicas == nil || *sts.Spec.Replicas != 1 {
t.Error("replicas should be 1")
}
// Container
if len(sts.Spec.Template.Spec.Containers) != 1 {
t.Fatalf("containers = %d, want 1", len(sts.Spec.Template.Spec.Containers))
}
c := sts.Spec.Template.Spec.Containers[0]
if c.Name != "master" {
t.Errorf("container name = %q", c.Name)
}
if c.Image != "chrislusf/seaweedfs:latest" {
t.Errorf("image = %q", c.Image)
}
if len(c.Command) != 1 || c.Command[0] != "/usr/bin/weed" {
t.Errorf("command = %v", c.Command)
}
// Key args
if !containsArg(c.Args, "master") {
t.Error("missing 'master' arg")
}
if !containsArg(c.Args, "-port=9333") {
t.Error("missing -port arg")
}
if !containsArg(c.Args, "-mdir=/data") {
t.Error("missing -mdir arg")
}
if !containsArg(c.Args, "-ip.bind=0.0.0.0") {
t.Error("missing -ip.bind arg")
}
// Readiness probe
if c.ReadinessProbe == nil {
t.Fatal("missing readiness probe")
}
if c.ReadinessProbe.HTTPGet.Path != "/cluster/status" {
t.Errorf("readiness path = %q, want /cluster/status", c.ReadinessProbe.HTTPGet.Path)
}
// VolumeClaimTemplates
if len(sts.Spec.VolumeClaimTemplates) != 1 {
t.Fatalf("VCTs = %d, want 1", len(sts.Spec.VolumeClaimTemplates))
}
if sts.Spec.VolumeClaimTemplates[0].Name != "data" {
t.Error("VCT name should be 'data'")
}
// Volume mount
dataMount := findVolumeMount(c.VolumeMounts, "data")
if dataMount == nil || dataMount.MountPath != "/data" {
t.Error("missing /data mount")
}
}
func TestMasterStatefulSet_EmptyDir_NoStorage(t *testing.T) {
cluster := fullStackCluster()
cluster.Spec.Master.Storage = nil
sts := BuildMasterStatefulSet(cluster)
if len(sts.Spec.VolumeClaimTemplates) != 0 {
t.Error("should not have VCTs when no storage specified")
}
vol := findVolume(sts.Spec.Template.Spec.Volumes, "data")
if vol == nil || vol.EmptyDir == nil {
t.Error("should use emptyDir when no storage specified")
}
}
// --- Full-Stack: Volume Builder Tests ---
func TestVolumeService_GoldenConformance(t *testing.T) {
cluster := fullStackCluster()
svc := BuildVolumeService(cluster)
if svc.Name != "my-block-volume" {
t.Errorf("name = %q, want my-block-volume", svc.Name)
}
if svc.Spec.ClusterIP != "None" {
t.Error("should be headless")
}
if len(svc.Spec.Ports) != 3 {
t.Fatalf("ports = %d, want 3 (http, grpc, iscsi)", len(svc.Spec.Ports))
}
if svc.Spec.Ports[0].Port != 8080 {
t.Errorf("http port = %d", svc.Spec.Ports[0].Port)
}
if svc.Spec.Ports[1].Port != 18080 {
t.Errorf("grpc port = %d", svc.Spec.Ports[1].Port)
}
if svc.Spec.Ports[2].Port != 3260 {
t.Errorf("iscsi port = %d", svc.Spec.Ports[2].Port)
}
}
func TestVolumeStatefulSet_GoldenConformance(t *testing.T) {
cluster := fullStackCluster()
masterAddr := "my-block-master.default:9333"
sts := BuildVolumeStatefulSet(cluster, masterAddr)
if sts.Name != "my-block-volume" {
t.Errorf("name = %q, want my-block-volume", sts.Name)
}
if sts.Spec.ServiceName != "my-block-volume" {
t.Errorf("serviceName = %q", sts.Spec.ServiceName)
}
c := sts.Spec.Template.Spec.Containers[0]
if c.Name != "volume" {
t.Errorf("container name = %q", c.Name)
}
if c.Image != "chrislusf/seaweedfs:latest" {
t.Errorf("image = %q", c.Image)
}
// Key args
if !containsArg(c.Args, "volume") {
t.Error("missing 'volume' arg")
}
if !containsArg(c.Args, "-port=8080") {
t.Error("missing -port arg")
}
if !containsArg(c.Args, "-dir=/data1") {
t.Error("missing -dir arg")
}
if !containsArg(c.Args, "-master=my-block-master.default:9333") {
t.Error("missing -master arg")
}
if !containsArg(c.Args, "-block.dir=/data1/block") {
t.Error("missing -block.dir arg")
}
if !containsArg(c.Args, "-block.listen=0.0.0.0:3260") {
t.Error("missing -block.listen arg")
}
// Default portal: $(POD_IP):3260,1
if !containsArg(c.Args, "-block.portal=$(POD_IP):3260,1") {
t.Error("missing default -block.portal arg")
}
// POD_IP env var
if len(c.Env) < 1 || c.Env[0].Name != "POD_IP" {
t.Error("missing POD_IP env var")
}
// Ports
if len(c.Ports) != 3 {
t.Fatalf("ports = %d, want 3", len(c.Ports))
}
// VolumeClaimTemplates
if len(sts.Spec.VolumeClaimTemplates) != 1 {
t.Fatalf("VCTs = %d, want 1", len(sts.Spec.VolumeClaimTemplates))
}
// Volume mount
dataMount := findVolumeMount(c.VolumeMounts, "data")
if dataMount == nil || dataMount.MountPath != "/data1" {
t.Error("missing /data1 mount")
}
}
func TestVolumeStatefulSet_PortalOverride(t *testing.T) {
cluster := fullStackCluster()
cluster.Spec.Volume.PortalOverride = "192.168.1.100:3260,1"
sts := BuildVolumeStatefulSet(cluster, "master:9333")
c := sts.Spec.Template.Spec.Containers[0]
if !containsArg(c.Args, "-block.portal=192.168.1.100:3260,1") {
t.Error("portal override not applied")
}
}
func TestVolumeStatefulSet_ExtraArgs(t *testing.T) {
cluster := fullStackCluster()
cluster.Spec.Volume.ExtraArgs = []string{"-compactionMBps=50"}
sts := BuildVolumeStatefulSet(cluster, "master:9333")
c := sts.Spec.Template.Spec.Containers[0]
if !containsArg(c.Args, "-compactionMBps=50") {
t.Error("extraArgs not passed through")
}
}
func TestMasterStatefulSet_ExtraArgs(t *testing.T) {
cluster := fullStackCluster()
cluster.Spec.Master.ExtraArgs = []string{"-defaultReplication=001"}
sts := BuildMasterStatefulSet(cluster)
c := sts.Spec.Template.Spec.Containers[0]
if !containsArg(c.Args, "-defaultReplication=001") {
t.Error("extraArgs not passed through")
}
}
// --- M1 fix: Volume readiness probe ---
func TestVolumeStatefulSet_HasReadinessProbe(t *testing.T) {
cluster := fullStackCluster()
sts := BuildVolumeStatefulSet(cluster, "master:9333")
c := sts.Spec.Template.Spec.Containers[0]
if c.ReadinessProbe == nil {
t.Fatal("volume container missing readiness probe")
}
if c.ReadinessProbe.HTTPGet == nil {
t.Fatal("volume readiness probe should be HTTPGet")
}
if c.ReadinessProbe.HTTPGet.Path != "/status" {
t.Errorf("readiness probe path = %q, want /status", c.ReadinessProbe.HTTPGet.Path)
}
}
// --- M3 fix: PVC StorageClassName wired ---
func TestMasterStatefulSet_PVCStorageClassName(t *testing.T) {
cluster := fullStackCluster()
sc := "fast-ssd"
cluster.Spec.Master.Storage.StorageClassName = &sc
sts := BuildMasterStatefulSet(cluster)
if len(sts.Spec.VolumeClaimTemplates) != 1 {
t.Fatal("expected 1 VCT")
}
pvc := sts.Spec.VolumeClaimTemplates[0]
if pvc.Spec.StorageClassName == nil || *pvc.Spec.StorageClassName != "fast-ssd" {
t.Errorf("PVC storageClassName = %v, want fast-ssd", pvc.Spec.StorageClassName)
}
}
func TestVolumeStatefulSet_PVCStorageClassName(t *testing.T) {
cluster := fullStackCluster()
sc := "slow-hdd"
cluster.Spec.Volume.Storage.StorageClassName = &sc
sts := BuildVolumeStatefulSet(cluster, "master:9333")
if len(sts.Spec.VolumeClaimTemplates) != 1 {
t.Fatal("expected 1 VCT")
}
pvc := sts.Spec.VolumeClaimTemplates[0]
if pvc.Spec.StorageClassName == nil || *pvc.Spec.StorageClassName != "slow-hdd" {
t.Errorf("PVC storageClassName = %v, want slow-hdd", pvc.Spec.StorageClassName)
}
}

100
weed/storage/blockvol/operator/internal/resources/secret.go

@ -0,0 +1,100 @@
package resources
import (
"crypto/rand"
"encoding/hex"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
)
// CHAPSecretName returns the auto-generated CHAP secret name.
func CHAPSecretName(cluster *blockv1alpha1.SeaweedBlockCluster) string {
return fmt.Sprintf("%s-chap", cluster.Name)
}
// BuildCHAPSecret constructs a CHAP authentication Secret with a random password.
// The secret is created once and not overwritten on subsequent reconcile loops.
// Rotation is triggered via the AnnotationRotateSecret annotation.
func BuildCHAPSecret(cluster *blockv1alpha1.SeaweedBlockCluster, csiNS string) (*corev1.Secret, error) {
password, err := generateRandomPassword(32)
if err != nil {
return nil, fmt.Errorf("generate CHAP password: %w", err)
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: CHAPSecretName(cluster),
Namespace: csiNS,
Labels: ComponentLabels(cluster, "chap-secret"),
Annotations: map[string]string{
blockv1alpha1.AnnotationLastRotated: time.Now().UTC().Format(time.RFC3339),
},
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
"username": []byte("chap-user"),
"password": []byte(password),
},
}
// Cross-namespace: use labels + finalizer, not ownerRef.
SetCrossNamespaceOwnership(cluster, &secret.ObjectMeta)
return secret, nil
}
// NeedsRotation returns true if the CR's rotate-secret annotation is newer
// than the secret's last-rotated annotation.
func NeedsRotation(cluster *blockv1alpha1.SeaweedBlockCluster, existingSecret *corev1.Secret) bool {
rotateTS, ok := cluster.Annotations[blockv1alpha1.AnnotationRotateSecret]
if !ok || rotateTS == "" {
return false
}
lastRotated, ok := existingSecret.Annotations[blockv1alpha1.AnnotationLastRotated]
if !ok || lastRotated == "" {
return true
}
requestTime, err := time.Parse(time.RFC3339, rotateTS)
if err != nil {
return false
}
lastTime, err := time.Parse(time.RFC3339, lastRotated)
if err != nil {
// BUG-QA-2 fix: malformed lastRotated must NOT trigger rotation every reconcile.
// Treat unparseable timestamps as "rotation already happened" to prevent
// infinite password churn that would break live iSCSI sessions.
return false
}
return requestTime.After(lastTime)
}
// RegenerateCHAPPassword replaces the password in an existing secret and updates
// the last-rotated annotation.
func RegenerateCHAPPassword(secret *corev1.Secret) error {
password, err := generateRandomPassword(32)
if err != nil {
return fmt.Errorf("generate CHAP password: %w", err)
}
secret.Data["password"] = []byte(password)
if secret.Annotations == nil {
secret.Annotations = make(map[string]string)
}
secret.Annotations[blockv1alpha1.AnnotationLastRotated] = time.Now().UTC().Format(time.RFC3339)
return nil
}
func generateRandomPassword(length int) (string, error) {
b := make([]byte, length)
if _, err := rand.Read(b); err != nil {
return "", err
}
return hex.EncodeToString(b), nil
}

31
weed/storage/blockvol/operator/internal/resources/storageclass.go

@ -0,0 +1,31 @@
package resources
import (
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
)
// BuildStorageClass constructs the StorageClass for block volumes.
// Reference: csi/deploy/storageclass.yaml
func BuildStorageClass(cluster *blockv1alpha1.SeaweedBlockCluster) *storagev1.StorageClass {
scName := cluster.Spec.StorageClassName
if scName == "" {
scName = blockv1alpha1.DefaultStorageClassName
}
bindingMode := storagev1.VolumeBindingWaitForFirstConsumer
reclaimPolicy := corev1.PersistentVolumeReclaimDelete
return &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: scName,
Labels: ClusterScopedLabels(cluster, "storageclass"),
},
Provisioner: blockv1alpha1.CSIDriverName,
VolumeBindingMode: &bindingMode,
ReclaimPolicy: &reclaimPolicy,
}
}

204
weed/storage/blockvol/operator/internal/resources/volume.go

@ -0,0 +1,204 @@
package resources
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
)
// VolumeServiceName returns the headless Service name for volume servers.
func VolumeServiceName(cluster *blockv1alpha1.SeaweedBlockCluster) string {
return fmt.Sprintf("%s-volume", cluster.Name)
}
// BuildVolumeService constructs the headless Service for volume StatefulSet.
func BuildVolumeService(cluster *blockv1alpha1.SeaweedBlockCluster) *corev1.Service {
vs := cluster.Spec.Volume
port := vs.Port
if port == 0 {
port = blockv1alpha1.DefaultVolumePort
}
grpcPort := vs.GRPCPort
if grpcPort == 0 {
grpcPort = blockv1alpha1.DefaultVolumeGRPCPort
}
blockPort := vs.BlockListenPort
if blockPort == 0 {
blockPort = blockv1alpha1.DefaultBlockListenPort
}
name := VolumeServiceName(cluster)
labels := ComponentLabels(cluster, "volume")
selectorLabels := SelectorLabels(cluster, "volume")
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: cluster.Namespace,
Labels: labels,
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
Selector: selectorLabels,
PublishNotReadyAddresses: true,
Ports: []corev1.ServicePort{
{Name: "http", Port: port, TargetPort: intstr.FromInt32(port)},
{Name: "grpc", Port: grpcPort, TargetPort: intstr.FromInt32(grpcPort)},
{Name: "iscsi", Port: blockPort, TargetPort: intstr.FromInt32(blockPort)},
},
},
}
SetOwnerReference(cluster, &svc.ObjectMeta)
return svc
}
// BuildVolumeStatefulSet constructs the volume server StatefulSet.
func BuildVolumeStatefulSet(cluster *blockv1alpha1.SeaweedBlockCluster, masterAddr string) *appsv1.StatefulSet {
vs := cluster.Spec.Volume
replicas := int32(1)
if vs.Replicas != nil {
replicas = *vs.Replicas
}
port := vs.Port
if port == 0 {
port = blockv1alpha1.DefaultVolumePort
}
grpcPort := vs.GRPCPort
if grpcPort == 0 {
grpcPort = blockv1alpha1.DefaultVolumeGRPCPort
}
blockDir := vs.BlockDir
if blockDir == "" {
blockDir = blockv1alpha1.DefaultBlockDir
}
blockPort := vs.BlockListenPort
if blockPort == 0 {
blockPort = blockv1alpha1.DefaultBlockListenPort
}
name := VolumeServiceName(cluster)
labels := ComponentLabels(cluster, "volume")
selectorLabels := SelectorLabels(cluster, "volume")
image := cluster.Spec.Image
if image == "" {
image = blockv1alpha1.DefaultImage
}
args := []string{
"volume",
fmt.Sprintf("-port=%d", port),
"-dir=/data1",
fmt.Sprintf("-master=%s", masterAddr),
"-ip.bind=0.0.0.0",
fmt.Sprintf("-block.dir=%s", blockDir),
fmt.Sprintf("-block.listen=0.0.0.0:%d", blockPort),
}
// Portal override or auto-derive via downward API
if vs.PortalOverride != "" {
args = append(args, fmt.Sprintf("-block.portal=%s", vs.PortalOverride))
} else {
args = append(args, fmt.Sprintf("-block.portal=$(POD_IP):%d,1", blockPort))
}
args = append(args, vs.ExtraArgs...)
sts := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: cluster.Namespace,
Labels: labels,
},
Spec: appsv1.StatefulSetSpec{
ServiceName: name,
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: selectorLabels},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "volume",
Image: image,
ImagePullPolicy: blockv1alpha1.DefaultImagePullPolicy,
Command: []string{"/usr/bin/weed"},
Args: args,
Env: []corev1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
Ports: []corev1.ContainerPort{
{Name: "http", ContainerPort: port},
{Name: "grpc", ContainerPort: grpcPort},
{Name: "iscsi", ContainerPort: blockPort},
},
VolumeMounts: []corev1.VolumeMount{
{Name: "data", MountPath: "/data1"},
},
// M1 fix: readiness probe for volume server
ReadinessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/status",
Port: intstr.FromInt32(port),
},
},
InitialDelaySeconds: 10,
PeriodSeconds: 15,
},
Resources: vs.Resources,
},
},
},
},
},
}
// VolumeClaimTemplate for data
if vs.Storage != nil && vs.Storage.Size != "" {
pvcSpec := corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
Resources: corev1.VolumeResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse(vs.Storage.Size), // safe: validated by controller
},
},
}
// M3 fix: wire StorageClassName into PVC
if vs.Storage.StorageClassName != nil {
pvcSpec.StorageClassName = vs.Storage.StorageClassName
}
sts.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "data"},
Spec: pvcSpec,
},
}
} else {
sts.Spec.Template.Spec.Volumes = []corev1.Volume{
{
Name: "data",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
}
}
SetOwnerReference(cluster, &sts.ObjectMeta)
return sts
}

76
weed/storage/blockvol/operator/main.go

@ -0,0 +1,76 @@
package main
import (
"flag"
"os"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
blockv1alpha1 "github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/api/v1alpha1"
"github.com/seaweedfs/seaweedfs/weed/storage/blockvol/operator/internal/controller"
)
var scheme = runtime.NewScheme()
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(blockv1alpha1.AddToScheme(scheme))
}
func main() {
var metricsAddr string
var probeAddr string
var enableLeaderElection bool
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. Ensures only one active controller.")
opts := zap.Options{Development: true}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
setupLog := ctrl.Log.WithName("setup")
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
Metrics: metricsserver.Options{BindAddress: metricsAddr},
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "sw-block-operator-lock",
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if err := (&controller.Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "SeaweedBlockCluster")
os.Exit(1)
}
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
Loading…
Cancel
Save