Browse Source

Filer: Add code quality improvements for deletion retry

Address PR feedback with minor optimizations:
- Add MaxLoggedErrorDetails constant (replaces magic number 10)
- Pre-allocate slices and maps in processRetryBatch for efficiency
- Improve log message formatting to use constant

These changes improve code maintainability and runtime performance
without altering functionality.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
pull/7402/head
Dimonyga 2 months ago
parent
commit
f5baddb134
  1. 14
      weed/filer/filer_deletion.go

14
weed/filer/filer_deletion.go

@ -28,6 +28,8 @@ const (
DeletionRetryPollInterval = 1 * time.Minute DeletionRetryPollInterval = 1 * time.Minute
// Maximum number of items to process per retry iteration // Maximum number of items to process per retry iteration
DeletionRetryBatchSize = 1000 DeletionRetryBatchSize = 1000
// Maximum number of error details to include in log messages
MaxLoggedErrorDetails = 10
) )
// DeletionRetryItem represents a file deletion that failed and needs to be retried // DeletionRetryItem represents a file deletion that failed and needs to be retried
@ -299,13 +301,13 @@ func (f *Filer) processDeletionBatch(toDeleteFileIds []string, lookupFunc func([
// Retryable error - add to retry queue // Retryable error - add to retry queue
retryableErrorCount++ retryableErrorCount++
retryQueue.AddOrUpdate(result.FileId, result.Error) retryQueue.AddOrUpdate(result.FileId, result.Error)
if len(errorDetails) < 10 {
if len(errorDetails) < MaxLoggedErrorDetails {
errorDetails = append(errorDetails, result.FileId+": "+result.Error+" (will retry)") errorDetails = append(errorDetails, result.FileId+": "+result.Error+" (will retry)")
} }
} else { } else {
// Permanent error - log but don't retry // Permanent error - log but don't retry
permanentErrorCount++ permanentErrorCount++
if len(errorDetails) < 10 {
if len(errorDetails) < MaxLoggedErrorDetails {
errorDetails = append(errorDetails, result.FileId+": "+result.Error+" (permanent)") errorDetails = append(errorDetails, result.FileId+": "+result.Error+" (permanent)")
} }
} }
@ -319,8 +321,8 @@ func (f *Filer) processDeletionBatch(toDeleteFileIds []string, lookupFunc func([
if totalErrors > 0 { if totalErrors > 0 {
logMessage := fmt.Sprintf("failed to delete %d/%d files (%d retryable, %d permanent)", logMessage := fmt.Sprintf("failed to delete %d/%d files (%d retryable, %d permanent)",
totalErrors, len(toDeleteFileIds), retryableErrorCount, permanentErrorCount) totalErrors, len(toDeleteFileIds), retryableErrorCount, permanentErrorCount)
if totalErrors > 10 {
logMessage += " (showing first 10)"
if totalErrors > MaxLoggedErrorDetails {
logMessage += fmt.Sprintf(" (showing first %d)", MaxLoggedErrorDetails)
} }
glog.V(0).Infof("%s: %v", logMessage, strings.Join(errorDetails, "; ")) glog.V(0).Infof("%s: %v", logMessage, strings.Join(errorDetails, "; "))
} }
@ -410,8 +412,8 @@ func (f *Filer) loopProcessingDeletionRetry(lookupFunc func([]string) (map[strin
// re-queued with updated retry counts, and permanent errors are logged and discarded. // re-queued with updated retry counts, and permanent errors are logged and discarded.
func (f *Filer) processRetryBatch(readyItems []*DeletionRetryItem, lookupFunc func([]string) (map[string]*operation.LookupResult, error), retryQueue *DeletionRetryQueue) { func (f *Filer) processRetryBatch(readyItems []*DeletionRetryItem, lookupFunc func([]string) (map[string]*operation.LookupResult, error), retryQueue *DeletionRetryQueue) {
// Extract file IDs from retry items // Extract file IDs from retry items
var fileIds []string
itemsByFileId := make(map[string]*DeletionRetryItem)
fileIds := make([]string, 0, len(readyItems))
itemsByFileId := make(map[string]*DeletionRetryItem, len(readyItems))
for _, item := range readyItems { for _, item := range readyItems {
fileIds = append(fileIds, item.FileId) fileIds = append(fileIds, item.FileId)
itemsByFileId[item.FileId] = item itemsByFileId[item.FileId] = item

Loading…
Cancel
Save