diff --git a/docker/admin_integration/Makefile b/docker/admin_integration/Makefile index cefbb59dc..9392182c6 100644 --- a/docker/admin_integration/Makefile +++ b/docker/admin_integration/Makefile @@ -1,7 +1,7 @@ # SeaweedFS Admin Integration Test Makefile # Tests the admin server and worker functionality using official weed commands -.PHONY: help build build-and-restart start stop restart logs clean status test admin-ui worker-logs master-logs admin-logs +.PHONY: help build build-and-restart restart-workers start stop restart logs clean status test admin-ui worker-logs master-logs admin-logs .DEFAULT_GOAL := help COMPOSE_FILE := docker-compose-ec-test.yml @@ -19,10 +19,20 @@ build: ## Build SeaweedFS with latest changes and create Docker image @echo "💡 Run 'make restart' to apply changes to running services" build-and-restart: build ## Build with latest changes and restart services - @echo "🔄 Restarting services to apply changes..." - @docker-compose -f $(COMPOSE_FILE) restart admin - @echo "✅ Services restarted with latest changes!" + @echo "🔄 Recreating services with new image..." + @echo "1️⃣ Recreating admin server with new image..." + @docker-compose -f $(COMPOSE_FILE) up -d admin + @sleep 5 + @echo "2️⃣ Recreating workers to reconnect..." + @docker-compose -f $(COMPOSE_FILE) up -d worker1 worker2 worker3 + @echo "✅ All services recreated with latest changes!" @echo "🌐 Admin UI: http://localhost:23646/" + @echo "💡 Workers will reconnect to the new admin server" + +restart-workers: ## Restart all workers to reconnect to admin server + @echo "🔄 Restarting workers to reconnect to admin server..." + @docker-compose -f $(COMPOSE_FILE) restart worker1 worker2 worker3 + @echo "✅ Workers restarted and will reconnect to admin server" help: ## Show this help message @echo "SeaweedFS Admin Integration Test" diff --git a/weed/admin/handlers/maintenance_handlers.go b/weed/admin/handlers/maintenance_handlers.go index 91629e150..cf090d5ad 100644 --- a/weed/admin/handlers/maintenance_handlers.go +++ b/weed/admin/handlers/maintenance_handlers.go @@ -10,6 +10,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/admin/view/app" "github.com/seaweedfs/seaweedfs/weed/admin/view/components" "github.com/seaweedfs/seaweedfs/weed/admin/view/layout" + "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/worker/tasks" "github.com/seaweedfs/seaweedfs/weed/worker/types" ) @@ -30,19 +31,31 @@ func NewMaintenanceHandlers(adminServer *dash.AdminServer) *MaintenanceHandlers func (h *MaintenanceHandlers) ShowMaintenanceQueue(c *gin.Context) { data, err := h.getMaintenanceQueueData() if err != nil { + glog.Infof("DEBUG ShowMaintenanceQueue: error getting data: %v", err) c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return } + glog.Infof("DEBUG ShowMaintenanceQueue: got data with %d tasks", len(data.Tasks)) + if data.Stats != nil { + glog.Infof("DEBUG ShowMaintenanceQueue: stats = {pending: %d, running: %d, completed: %d}", + data.Stats.PendingTasks, data.Stats.RunningTasks, data.Stats.CompletedToday) + } else { + glog.Infof("DEBUG ShowMaintenanceQueue: stats is nil") + } + // Render HTML template c.Header("Content-Type", "text/html") maintenanceComponent := app.MaintenanceQueue(data) layoutComponent := layout.Layout(c, maintenanceComponent) err = layoutComponent.Render(c.Request.Context(), c.Writer) if err != nil { + glog.Infof("DEBUG ShowMaintenanceQueue: render error: %v", err) c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) return } + + glog.Infof("DEBUG ShowMaintenanceQueue: template rendered successfully") } // ShowMaintenanceWorkers displays the maintenance workers page @@ -287,27 +300,42 @@ func (h *MaintenanceHandlers) UpdateMaintenanceConfig(c *gin.Context) { // Helper methods that delegate to AdminServer func (h *MaintenanceHandlers) getMaintenanceQueueData() (*maintenance.MaintenanceQueueData, error) { + glog.Infof("DEBUG getMaintenanceQueueData: starting data assembly") + tasks, err := h.getMaintenanceTasks() if err != nil { + glog.Infof("DEBUG getMaintenanceQueueData: error getting tasks: %v", err) return nil, err } + glog.Infof("DEBUG getMaintenanceQueueData: got %d tasks", len(tasks)) workers, err := h.getMaintenanceWorkers() if err != nil { + glog.Infof("DEBUG getMaintenanceQueueData: error getting workers: %v", err) return nil, err } + glog.Infof("DEBUG getMaintenanceQueueData: got %d workers", len(workers)) stats, err := h.getMaintenanceQueueStats() if err != nil { + glog.Infof("DEBUG getMaintenanceQueueData: error getting stats: %v", err) return nil, err } + if stats != nil { + glog.Infof("DEBUG getMaintenanceQueueData: got stats {pending: %d, running: %d}", stats.PendingTasks, stats.RunningTasks) + } else { + glog.Infof("DEBUG getMaintenanceQueueData: stats is nil") + } - return &maintenance.MaintenanceQueueData{ + data := &maintenance.MaintenanceQueueData{ Tasks: tasks, Workers: workers, Stats: stats, LastUpdated: time.Now(), - }, nil + } + + glog.Infof("DEBUG getMaintenanceQueueData: assembled data with %d tasks, %d workers", len(data.Tasks), len(data.Workers)) + return data, nil } func (h *MaintenanceHandlers) getMaintenanceQueueStats() (*maintenance.QueueStats, error) { @@ -316,14 +344,31 @@ func (h *MaintenanceHandlers) getMaintenanceQueueStats() (*maintenance.QueueStat } func (h *MaintenanceHandlers) getMaintenanceTasks() ([]*maintenance.MaintenanceTask, error) { - // Call the private method logic directly since the public GetMaintenanceTasks is for HTTP handlers + // Call the maintenance manager directly to get all tasks if h.adminServer == nil { + glog.Infof("DEBUG getMaintenanceTasks: adminServer is nil") return []*maintenance.MaintenanceTask{}, nil } - // We need to access the maintenance manager through reflection or add a proper accessor - // For now, return empty tasks until proper accessor is added - return []*maintenance.MaintenanceTask{}, nil + if h.adminServer.GetMaintenanceManager() == nil { + glog.Infof("DEBUG getMaintenanceTasks: maintenance manager is nil") + return []*maintenance.MaintenanceTask{}, nil + } + + // Get ALL tasks using empty parameters - this should match what the API returns + allTasks := h.adminServer.GetMaintenanceManager().GetTasks("", "", 0) + glog.Infof("DEBUG getMaintenanceTasks: retrieved %d tasks from maintenance manager", len(allTasks)) + + for i, task := range allTasks { + if task != nil { + glog.Infof("DEBUG getMaintenanceTasks: task[%d] = {id: %s, type: %s, status: %s, volume: %d}", + i, task.ID, task.Type, task.Status, task.VolumeID) + } else { + glog.Infof("DEBUG getMaintenanceTasks: task[%d] is nil", i) + } + } + + return allTasks, nil } func (h *MaintenanceHandlers) getMaintenanceWorkers() ([]*maintenance.MaintenanceWorker, error) { diff --git a/weed/admin/maintenance/maintenance_queue.go b/weed/admin/maintenance/maintenance_queue.go index 580a98718..155c2f017 100644 --- a/weed/admin/maintenance/maintenance_queue.go +++ b/weed/admin/maintenance/maintenance_queue.go @@ -1,6 +1,8 @@ package maintenance import ( + "crypto/rand" + "fmt" "sort" "time" @@ -24,11 +26,17 @@ func (mq *MaintenanceQueue) SetIntegration(integration *MaintenanceIntegration) glog.V(1).Infof("Maintenance queue configured with integration") } -// AddTask adds a new maintenance task to the queue +// AddTask adds a new maintenance task to the queue with deduplication func (mq *MaintenanceQueue) AddTask(task *MaintenanceTask) { mq.mutex.Lock() defer mq.mutex.Unlock() + // Check for duplicate tasks (same type + volume + not completed) + if mq.hasDuplicateTask(task) { + glog.V(2).Infof("Skipping duplicate task: %s for volume %d (already exists)", task.Type, task.VolumeID) + return + } + task.ID = generateTaskID() task.Status = TaskStatusPending task.CreatedAt = time.Now() @@ -48,6 +56,21 @@ func (mq *MaintenanceQueue) AddTask(task *MaintenanceTask) { glog.V(2).Infof("Added maintenance task %s: %s for volume %d", task.ID, task.Type, task.VolumeID) } +// hasDuplicateTask checks if a similar task already exists (same type, volume, and not completed) +func (mq *MaintenanceQueue) hasDuplicateTask(newTask *MaintenanceTask) bool { + for _, existingTask := range mq.tasks { + if existingTask.Type == newTask.Type && + existingTask.VolumeID == newTask.VolumeID && + existingTask.Server == newTask.Server && + (existingTask.Status == TaskStatusPending || + existingTask.Status == TaskStatusAssigned || + existingTask.Status == TaskStatusInProgress) { + return true + } + } + return false +} + // AddTasksFromResults converts detection results to tasks and adds them to the queue func (mq *MaintenanceQueue) AddTasksFromResults(results []*TaskDetectionResult) { for _, result := range results { @@ -311,10 +334,23 @@ func (mq *MaintenanceQueue) GetWorkers() []*MaintenanceWorker { func generateTaskID() string { const charset = "abcdefghijklmnopqrstuvwxyz0123456789" b := make([]byte, 8) + randBytes := make([]byte, 8) + + // Generate random bytes + if _, err := rand.Read(randBytes); err != nil { + // Fallback to timestamp-based ID if crypto/rand fails + timestamp := time.Now().UnixNano() + return fmt.Sprintf("task-%d", timestamp) + } + + // Convert random bytes to charset for i := range b { - b[i] = charset[i%len(charset)] + b[i] = charset[int(randBytes[i])%len(charset)] } - return string(b) + + // Add timestamp suffix to ensure uniqueness + timestamp := time.Now().Unix() % 10000 // last 4 digits of timestamp + return fmt.Sprintf("%s-%04d", string(b), timestamp) } // CleanupOldTasks removes old completed and failed tasks diff --git a/weed/admin/view/app/maintenance_queue.templ b/weed/admin/view/app/maintenance_queue.templ index e4b9b6b92..c4e01b963 100644 --- a/weed/admin/view/app/maintenance_queue.templ +++ b/weed/admin/view/app/maintenance_queue.templ @@ -103,9 +103,9 @@ templ MaintenanceQueue(data *maintenance.MaintenanceQueueData) {
for _, task := range data.Tasks { - if task.Status == maintenance.TaskStatusPending { + if string(task.Status) == "pending" {{task.ID[:8]}...{task.ID}{task.ID[:8]}...{task.ID}{task.ID[:8]}...{task.ID}{task.ID[:8]}...{task.ID}")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
- templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(task.ID[:8])
+ templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(task.ID)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 108, Col: 74}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 108, Col: 70}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "...")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var12 string
- templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(task.ID[:8])
+ templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(task.ID)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 165, Col: 74}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 165, Col: 70}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "...")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var17 string
- templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(task.ID[:8])
+ templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(task.ID)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 235, Col: 78}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 235, Col: 74}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "...