Browse Source

go fmt

pull/5150/merge
chrislu 1 month ago
parent
commit
b7ba6785a2
  1. 1
      weed/admin/handlers/maintenance_handlers.go
  2. 4
      weed/admin/maintenance/maintenance_integration.go
  3. 4
      weed/command/autocomplete.go
  4. 1
      weed/mq/broker/broker_grpc_sub.go
  5. 1
      weed/mq/kafka/consumer_offset/memory_storage.go
  6. 1
      weed/mq/kafka/consumer_offset/memory_storage_test.go
  7. 1
      weed/mq/kafka/consumer_offset/storage.go
  8. 1
      weed/mq/kafka/gateway/test_mock_handler.go
  9. 2
      weed/mq/kafka/package.go
  10. 2
      weed/mq/kafka/partition_mapping.go
  11. 2
      weed/mq/kafka/protocol/describe_cluster.go
  12. 1
      weed/mq/kafka/protocol/flexible_versions.go
  13. 5
      weed/mq/kafka/protocol/handler.go
  14. 1
      weed/mq/kafka/protocol/offset_storage_adapter.go
  15. 1
      weed/mq/kafka/protocol/response_validation_example_test.go
  16. 2
      weed/mq/metadata_constants.go
  17. 2
      weed/pb/mq_agent_pb/publish_response_test.go
  18. 2
      weed/pb/schema_pb/offset_test.go
  19. 2
      weed/util/log_buffer/log_buffer_flush_gap_test.go

1
weed/admin/handlers/maintenance_handlers.go

@ -46,7 +46,6 @@ func (h *MaintenanceHandlers) ShowTaskDetail(c *gin.Context) {
return
}
c.Header("Content-Type", "text/html")
taskDetailComponent := app.TaskDetail(taskDetail)
layoutComponent := layout.Layout(c, taskDetailComponent)

4
weed/admin/maintenance/maintenance_integration.go

@ -306,25 +306,21 @@ func (s *MaintenanceIntegration) CanScheduleWithTaskSchedulers(task *Maintenance
return false // Fallback to existing logic for unknown types
}
// Convert task objects
taskObject := s.convertTaskToTaskSystem(task)
if taskObject == nil {
return false
}
runningTaskObjects := s.convertTasksToTaskSystem(runningTasks)
workerObjects := s.convertWorkersToTaskSystem(availableWorkers)
// Get the appropriate scheduler
scheduler := s.taskRegistry.GetScheduler(taskType)
if scheduler == nil {
return false
}
canSchedule := scheduler.CanScheduleNow(taskObject, runningTaskObjects, workerObjects)
return canSchedule

4
weed/command/autocomplete.go

@ -2,11 +2,11 @@ package command
import (
"fmt"
"os"
"path/filepath"
"github.com/posener/complete"
completeinstall "github.com/posener/complete/cmd/install"
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9"
"os"
"path/filepath"
"runtime"
)

1
weed/mq/broker/broker_grpc_sub.go

@ -272,7 +272,6 @@ subscribeLoop:
TsNs: logEntry.TsNs,
}
if err := stream.Send(&mq_pb.SubscribeMessageResponse{Message: &mq_pb.SubscribeMessageResponse_Data{
Data: dataMsg,
}}); err != nil {

1
weed/mq/kafka/consumer_offset/memory_storage.go

@ -142,4 +142,3 @@ func (m *MemoryStorage) Close() error {
return nil
}

1
weed/mq/kafka/consumer_offset/memory_storage_test.go

@ -206,4 +206,3 @@ func TestMemoryStorageOverwrite(t *testing.T) {
assert.Equal(t, int64(20), offset)
assert.Equal(t, "meta2", metadata)
}

1
weed/mq/kafka/consumer_offset/storage.go

@ -56,4 +56,3 @@ var (
ErrInvalidPartition = fmt.Errorf("invalid partition")
ErrStorageClosed = fmt.Errorf("storage is closed")
)

1
weed/mq/kafka/gateway/test_mock_handler.go

@ -121,7 +121,6 @@ func (m *mockSeaweedMQHandler) ProduceRecord(ctx context.Context, topicName stri
offset := m.offsets[topicName][partitionID]
m.offsets[topicName][partitionID]++
// Store record
record := &mockRecord{
key: key,

2
weed/mq/kafka/package.go

@ -9,5 +9,3 @@ package kafka
// - offset/: Offset management
// - schema/: Schema registry integration
// - consumer/: Consumer group coordination

2
weed/mq/kafka/partition_mapping.go

@ -51,5 +51,3 @@ func GetRangeSize() int32 {
func GetMaxKafkaPartitions() int32 {
return int32(pub_balancer.MaxPartitionCount) / 35 // 72 partitions
}

2
weed/mq/kafka/protocol/describe_cluster.go

@ -37,7 +37,6 @@ func (h *Handler) handleDescribeCluster(correlationID uint32, apiVersion uint16,
// Tagged fields at end of request
// (We don't parse them, just skip)
// Build response
response := make([]byte, 0, 256)
@ -109,6 +108,5 @@ func (h *Handler) handleDescribeCluster(correlationID uint32, apiVersion uint16,
// Response-level tagged fields (flexible response)
response = append(response, 0x00) // Empty tagged fields
return response, nil
}

1
weed/mq/kafka/protocol/flexible_versions.go

@ -268,7 +268,6 @@ func parseCompactString(data []byte) ([]byte, int) {
return nil, 0
}
if actualLength == 0 {
// Empty string (length was 1)
return []byte{}, consumed

5
weed/mq/kafka/protocol/handler.go

@ -881,7 +881,6 @@ func (h *Handler) HandleConn(ctx context.Context, conn net.Conn) error {
return fmt.Errorf("read message: %w", err)
}
// Parse at least the basic header to get API key and correlation ID
if len(messageBuf) < 8 {
return fmt.Errorf("message too short")
@ -1050,7 +1049,6 @@ func (h *Handler) processRequestSync(req *kafkaRequest) ([]byte, error) {
requestStart := time.Now()
apiName := getAPIName(APIKey(req.apiKey))
// Only log high-volume requests at V(2), not V(4)
if glog.V(2) {
glog.V(2).Infof("[API] %s (key=%d, ver=%d, corr=%d)",
@ -1828,7 +1826,6 @@ func (h *Handler) handleMetadataV5ToV8(correlationID uint32, requestBody []byte,
// NOTE: Correlation ID is handled by writeResponseWithCorrelationID
// Do NOT include it in the response body
// ThrottleTimeMs (4 bytes) - v3+ addition
binary.Write(&buf, binary.BigEndian, int32(0)) // No throttling
@ -1994,7 +1991,6 @@ func (h *Handler) handleListOffsets(correlationID uint32, apiVersion uint16, req
// Parse minimal request to understand what's being asked (header already stripped)
offset := 0
maxBytes := len(requestBody)
if maxBytes > 64 {
maxBytes = 64
@ -3930,7 +3926,6 @@ func (h *Handler) handleInitProducerId(correlationID uint32, apiVersion uint16,
// v2+: transactional_id(NULLABLE_STRING) + transaction_timeout_ms(INT32) + producer_id(INT64) + producer_epoch(INT16)
// v4+: Uses flexible format with tagged fields
maxBytes := len(requestBody)
if maxBytes > 64 {
maxBytes = 64

1
weed/mq/kafka/protocol/offset_storage_adapter.go

@ -47,4 +47,3 @@ func (a *offsetStorageAdapter) DeleteGroup(group string) error {
func (a *offsetStorageAdapter) Close() error {
return a.storage.Close()
}

1
weed/mq/kafka/protocol/response_validation_example_test.go

@ -140,4 +140,3 @@ func TestMetadataResponseHasBrokers(t *testing.T) {
t.Logf("✓ Metadata response correctly has %d broker(s)", parsedCount)
}

2
weed/mq/metadata_constants.go

@ -17,5 +17,3 @@ const (
// Source file tracking for parquet deduplication
ExtendedAttrSources = "sources" // JSON-encoded list of source log files
)

2
weed/pb/mq_agent_pb/publish_response_test.go

@ -1,8 +1,8 @@
package mq_agent_pb
import (
"testing"
"google.golang.org/protobuf/proto"
"testing"
)
func TestPublishRecordResponseSerialization(t *testing.T) {

2
weed/pb/schema_pb/offset_test.go

@ -1,8 +1,8 @@
package schema_pb
import (
"testing"
"google.golang.org/protobuf/proto"
"testing"
)
func TestOffsetTypeEnums(t *testing.T) {

2
weed/util/log_buffer/log_buffer_flush_gap_test.go

@ -15,6 +15,7 @@ import (
// are lost in the gap between flushed disk data and in-memory buffer.
//
// OBSERVED BEHAVIOR FROM LOGS:
//
// Request offset: 1764
// Disk contains: 1000-1763 (764 messages)
// Memory buffer starts at: 1800
@ -677,4 +678,3 @@ func TestFlushOffsetGap_ForceFlushAdvancesBuffer(t *testing.T) {
}
flushMu.Unlock()
}
Loading…
Cancel
Save