Browse Source

go fmt

pull/5150/merge
chrislu 1 month ago
parent
commit
b7ba6785a2
  1. 1
      weed/admin/handlers/maintenance_handlers.go
  2. 4
      weed/admin/maintenance/maintenance_integration.go
  3. 28
      weed/command/autocomplete.go
  4. 1
      weed/mq/broker/broker_grpc_sub.go
  5. 48
      weed/mq/kafka/consumer/rebalance_timeout.go
  6. 2
      weed/mq/kafka/consumer/rebalance_timeout_test.go
  7. 1
      weed/mq/kafka/consumer_offset/memory_storage.go
  8. 1
      weed/mq/kafka/consumer_offset/memory_storage_test.go
  9. 1
      weed/mq/kafka/consumer_offset/storage.go
  10. 1
      weed/mq/kafka/gateway/test_mock_handler.go
  11. 2
      weed/mq/kafka/package.go
  12. 2
      weed/mq/kafka/partition_mapping.go
  13. 2
      weed/mq/kafka/protocol/describe_cluster.go
  14. 1
      weed/mq/kafka/protocol/flexible_versions.go
  15. 18
      weed/mq/kafka/protocol/group_introspection.go
  16. 23
      weed/mq/kafka/protocol/handler.go
  17. 1
      weed/mq/kafka/protocol/offset_storage_adapter.go
  18. 1
      weed/mq/kafka/protocol/response_validation_example_test.go
  19. 52
      weed/mq/kafka/schema/envelope_test.go
  20. 2
      weed/mq/metadata_constants.go
  21. 10
      weed/mq/offset/migration.go
  22. 8
      weed/mq/schema/flat_schema_utils_test.go
  23. 2
      weed/pb/mq_agent_pb/publish_response_test.go
  24. 6
      weed/pb/schema_pb/offset_test.go
  25. 18
      weed/remote_storage/azure/azure_storage_client_test.go
  26. 10
      weed/util/log_buffer/log_buffer_flush_gap_test.go
  27. 2
      weed/worker/client.go

1
weed/admin/handlers/maintenance_handlers.go

@ -46,7 +46,6 @@ func (h *MaintenanceHandlers) ShowTaskDetail(c *gin.Context) {
return return
} }
c.Header("Content-Type", "text/html") c.Header("Content-Type", "text/html")
taskDetailComponent := app.TaskDetail(taskDetail) taskDetailComponent := app.TaskDetail(taskDetail)
layoutComponent := layout.Layout(c, taskDetailComponent) layoutComponent := layout.Layout(c, taskDetailComponent)

4
weed/admin/maintenance/maintenance_integration.go

@ -306,25 +306,21 @@ func (s *MaintenanceIntegration) CanScheduleWithTaskSchedulers(task *Maintenance
return false // Fallback to existing logic for unknown types return false // Fallback to existing logic for unknown types
} }
// Convert task objects // Convert task objects
taskObject := s.convertTaskToTaskSystem(task) taskObject := s.convertTaskToTaskSystem(task)
if taskObject == nil { if taskObject == nil {
return false return false
} }
runningTaskObjects := s.convertTasksToTaskSystem(runningTasks) runningTaskObjects := s.convertTasksToTaskSystem(runningTasks)
workerObjects := s.convertWorkersToTaskSystem(availableWorkers) workerObjects := s.convertWorkersToTaskSystem(availableWorkers)
// Get the appropriate scheduler // Get the appropriate scheduler
scheduler := s.taskRegistry.GetScheduler(taskType) scheduler := s.taskRegistry.GetScheduler(taskType)
if scheduler == nil { if scheduler == nil {
return false return false
} }
canSchedule := scheduler.CanScheduleNow(taskObject, runningTaskObjects, workerObjects) canSchedule := scheduler.CanScheduleNow(taskObject, runningTaskObjects, workerObjects)
return canSchedule return canSchedule

28
weed/command/autocomplete.go

@ -2,11 +2,11 @@ package command
import ( import (
"fmt" "fmt"
"os"
"path/filepath"
"github.com/posener/complete" "github.com/posener/complete"
completeinstall "github.com/posener/complete/cmd/install" completeinstall "github.com/posener/complete/cmd/install"
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9" flag "github.com/seaweedfs/seaweedfs/weed/util/fla9"
"os"
"path/filepath"
"runtime" "runtime"
) )
@ -53,14 +53,14 @@ func printAutocompleteScript(shell string) bool {
return false return false
} }
switch shell {
case "bash":
fmt.Printf("complete -C %q weed\n", binPath)
case "zsh":
fmt.Printf("autoload -U +X bashcompinit && bashcompinit\n")
fmt.Printf("complete -o nospace -C %q weed\n", binPath)
case "fish":
fmt.Printf(`function __complete_weed
switch shell {
case "bash":
fmt.Printf("complete -C %q weed\n", binPath)
case "zsh":
fmt.Printf("autoload -U +X bashcompinit && bashcompinit\n")
fmt.Printf("complete -o nospace -C %q weed\n", binPath)
case "fish":
fmt.Printf(`function __complete_weed
set -lx COMP_LINE (commandline -cp) set -lx COMP_LINE (commandline -cp)
test -z (commandline -ct) test -z (commandline -ct)
and set COMP_LINE "$COMP_LINE " and set COMP_LINE "$COMP_LINE "
@ -68,10 +68,10 @@ func printAutocompleteScript(shell string) bool {
end end
complete -f -c weed -a "(__complete_weed)" complete -f -c weed -a "(__complete_weed)"
`, binPath) `, binPath)
default:
fmt.Fprintf(os.Stderr, "unsupported shell: %s. Supported shells: bash, zsh, fish\n", shell)
return false
}
default:
fmt.Fprintf(os.Stderr, "unsupported shell: %s. Supported shells: bash, zsh, fish\n", shell)
return false
}
return true return true
} }

1
weed/mq/broker/broker_grpc_sub.go

@ -272,7 +272,6 @@ subscribeLoop:
TsNs: logEntry.TsNs, TsNs: logEntry.TsNs,
} }
if err := stream.Send(&mq_pb.SubscribeMessageResponse{Message: &mq_pb.SubscribeMessageResponse_Data{ if err := stream.Send(&mq_pb.SubscribeMessageResponse{Message: &mq_pb.SubscribeMessageResponse_Data{
Data: dataMsg, Data: dataMsg,
}}); err != nil { }}); err != nil {

48
weed/mq/kafka/consumer/rebalance_timeout.go

@ -150,13 +150,13 @@ func (rtm *RebalanceTimeoutManager) GetRebalanceStatus(groupID string) *Rebalanc
defer group.Mu.RUnlock() defer group.Mu.RUnlock()
status := &RebalanceStatus{ status := &RebalanceStatus{
GroupID: groupID,
State: group.State,
Generation: group.Generation,
MemberCount: len(group.Members),
Leader: group.Leader,
LastActivity: group.LastActivity,
IsRebalancing: group.State == GroupStatePreparingRebalance || group.State == GroupStateCompletingRebalance,
GroupID: groupID,
State: group.State,
Generation: group.Generation,
MemberCount: len(group.Members),
Leader: group.Leader,
LastActivity: group.LastActivity,
IsRebalancing: group.State == GroupStatePreparingRebalance || group.State == GroupStateCompletingRebalance,
RebalanceDuration: time.Since(group.LastActivity), RebalanceDuration: time.Since(group.LastActivity),
} }
@ -194,25 +194,25 @@ func (rtm *RebalanceTimeoutManager) GetRebalanceStatus(groupID string) *Rebalanc
// RebalanceStatus represents the current status of a group's rebalance // RebalanceStatus represents the current status of a group's rebalance
type RebalanceStatus struct { type RebalanceStatus struct {
GroupID string `json:"group_id"`
State GroupState `json:"state"`
Generation int32 `json:"generation"`
MemberCount int `json:"member_count"`
Leader string `json:"leader"`
LastActivity time.Time `json:"last_activity"`
IsRebalancing bool `json:"is_rebalancing"`
RebalanceDuration time.Duration `json:"rebalance_duration"`
Members []MemberTimeoutStatus `json:"members"`
GroupID string `json:"group_id"`
State GroupState `json:"state"`
Generation int32 `json:"generation"`
MemberCount int `json:"member_count"`
Leader string `json:"leader"`
LastActivity time.Time `json:"last_activity"`
IsRebalancing bool `json:"is_rebalancing"`
RebalanceDuration time.Duration `json:"rebalance_duration"`
Members []MemberTimeoutStatus `json:"members"`
} }
// MemberTimeoutStatus represents timeout status for a group member // MemberTimeoutStatus represents timeout status for a group member
type MemberTimeoutStatus struct { type MemberTimeoutStatus struct {
MemberID string `json:"member_id"`
State MemberState `json:"state"`
LastHeartbeat time.Time `json:"last_heartbeat"`
JoinedAt time.Time `json:"joined_at"`
SessionTimeout time.Duration `json:"session_timeout"`
RebalanceTimeout time.Duration `json:"rebalance_timeout"`
SessionTimeRemaining time.Duration `json:"session_time_remaining"`
RebalanceTimeRemaining time.Duration `json:"rebalance_time_remaining"`
MemberID string `json:"member_id"`
State MemberState `json:"state"`
LastHeartbeat time.Time `json:"last_heartbeat"`
JoinedAt time.Time `json:"joined_at"`
SessionTimeout time.Duration `json:"session_timeout"`
RebalanceTimeout time.Duration `json:"rebalance_timeout"`
SessionTimeRemaining time.Duration `json:"session_time_remaining"`
RebalanceTimeRemaining time.Duration `json:"rebalance_time_remaining"`
} }

2
weed/mq/kafka/consumer/rebalance_timeout_test.go

@ -56,7 +56,7 @@ func TestRebalanceTimeoutManager_SessionTimeoutFallback(t *testing.T) {
member := &GroupMember{ member := &GroupMember{
ID: "member1", ID: "member1",
ClientID: "client1", ClientID: "client1",
SessionTimeout: 1000, // 1 second
SessionTimeout: 1000, // 1 second
RebalanceTimeout: 30000, // 30 seconds RebalanceTimeout: 30000, // 30 seconds
State: MemberStatePending, State: MemberStatePending,
LastHeartbeat: time.Now().Add(-2 * time.Second), // Last heartbeat 2 seconds ago LastHeartbeat: time.Now().Add(-2 * time.Second), // Last heartbeat 2 seconds ago

1
weed/mq/kafka/consumer_offset/memory_storage.go

@ -142,4 +142,3 @@ func (m *MemoryStorage) Close() error {
return nil return nil
} }

1
weed/mq/kafka/consumer_offset/memory_storage_test.go

@ -206,4 +206,3 @@ func TestMemoryStorageOverwrite(t *testing.T) {
assert.Equal(t, int64(20), offset) assert.Equal(t, int64(20), offset)
assert.Equal(t, "meta2", metadata) assert.Equal(t, "meta2", metadata)
} }

1
weed/mq/kafka/consumer_offset/storage.go

@ -56,4 +56,3 @@ var (
ErrInvalidPartition = fmt.Errorf("invalid partition") ErrInvalidPartition = fmt.Errorf("invalid partition")
ErrStorageClosed = fmt.Errorf("storage is closed") ErrStorageClosed = fmt.Errorf("storage is closed")
) )

1
weed/mq/kafka/gateway/test_mock_handler.go

@ -121,7 +121,6 @@ func (m *mockSeaweedMQHandler) ProduceRecord(ctx context.Context, topicName stri
offset := m.offsets[topicName][partitionID] offset := m.offsets[topicName][partitionID]
m.offsets[topicName][partitionID]++ m.offsets[topicName][partitionID]++
// Store record // Store record
record := &mockRecord{ record := &mockRecord{
key: key, key: key,

2
weed/mq/kafka/package.go

@ -9,5 +9,3 @@ package kafka
// - offset/: Offset management // - offset/: Offset management
// - schema/: Schema registry integration // - schema/: Schema registry integration
// - consumer/: Consumer group coordination // - consumer/: Consumer group coordination

2
weed/mq/kafka/partition_mapping.go

@ -51,5 +51,3 @@ func GetRangeSize() int32 {
func GetMaxKafkaPartitions() int32 { func GetMaxKafkaPartitions() int32 {
return int32(pub_balancer.MaxPartitionCount) / 35 // 72 partitions return int32(pub_balancer.MaxPartitionCount) / 35 // 72 partitions
} }

2
weed/mq/kafka/protocol/describe_cluster.go

@ -37,7 +37,6 @@ func (h *Handler) handleDescribeCluster(correlationID uint32, apiVersion uint16,
// Tagged fields at end of request // Tagged fields at end of request
// (We don't parse them, just skip) // (We don't parse them, just skip)
// Build response // Build response
response := make([]byte, 0, 256) response := make([]byte, 0, 256)
@ -109,6 +108,5 @@ func (h *Handler) handleDescribeCluster(correlationID uint32, apiVersion uint16,
// Response-level tagged fields (flexible response) // Response-level tagged fields (flexible response)
response = append(response, 0x00) // Empty tagged fields response = append(response, 0x00) // Empty tagged fields
return response, nil return response, nil
} }

1
weed/mq/kafka/protocol/flexible_versions.go

@ -268,7 +268,6 @@ func parseCompactString(data []byte) ([]byte, int) {
return nil, 0 return nil, 0
} }
if actualLength == 0 { if actualLength == 0 {
// Empty string (length was 1) // Empty string (length was 1)
return []byte{}, consumed return []byte{}, consumed

18
weed/mq/kafka/protocol/group_introspection.go

@ -107,13 +107,13 @@ func (h *Handler) describeGroup(groupID string) DescribeGroupsGroup {
} }
return DescribeGroupsGroup{ return DescribeGroupsGroup{
ErrorCode: 0,
GroupID: groupID,
State: stateStr,
ProtocolType: "consumer", // Default protocol type
Protocol: group.Protocol,
Members: members,
AuthorizedOps: []int32{}, // Empty for now
ErrorCode: 0,
GroupID: groupID,
State: stateStr,
ProtocolType: "consumer", // Default protocol type
Protocol: group.Protocol,
Members: members,
AuthorizedOps: []int32{}, // Empty for now
} }
} }
@ -175,8 +175,8 @@ func (h *Handler) listAllGroups(statesFilter []string) []ListGroupsGroup {
// Request/Response structures // Request/Response structures
type DescribeGroupsRequest struct { type DescribeGroupsRequest struct {
GroupIDs []string
IncludeAuthorizedOps bool
GroupIDs []string
IncludeAuthorizedOps bool
} }
type DescribeGroupsResponse struct { type DescribeGroupsResponse struct {

23
weed/mq/kafka/protocol/handler.go

@ -881,7 +881,6 @@ func (h *Handler) HandleConn(ctx context.Context, conn net.Conn) error {
return fmt.Errorf("read message: %w", err) return fmt.Errorf("read message: %w", err)
} }
// Parse at least the basic header to get API key and correlation ID // Parse at least the basic header to get API key and correlation ID
if len(messageBuf) < 8 { if len(messageBuf) < 8 {
return fmt.Errorf("message too short") return fmt.Errorf("message too short")
@ -1050,7 +1049,6 @@ func (h *Handler) processRequestSync(req *kafkaRequest) ([]byte, error) {
requestStart := time.Now() requestStart := time.Now()
apiName := getAPIName(APIKey(req.apiKey)) apiName := getAPIName(APIKey(req.apiKey))
// Only log high-volume requests at V(2), not V(4) // Only log high-volume requests at V(2), not V(4)
if glog.V(2) { if glog.V(2) {
glog.V(2).Infof("[API] %s (key=%d, ver=%d, corr=%d)", glog.V(2).Infof("[API] %s (key=%d, ver=%d, corr=%d)",
@ -1589,15 +1587,15 @@ func (h *Handler) HandleMetadataV2(correlationID uint32, requestBody []byte) ([]
for partitionID := int32(0); partitionID < partitionCount; partitionID++ { for partitionID := int32(0); partitionID < partitionCount; partitionID++ {
binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode
binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex
binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID
binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID
// ReplicaNodes array (4 bytes length + nodes) // ReplicaNodes array (4 bytes length + nodes)
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
// IsrNodes array (4 bytes length + nodes) // IsrNodes array (4 bytes length + nodes)
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
} }
} }
@ -1716,15 +1714,15 @@ func (h *Handler) HandleMetadataV3V4(correlationID uint32, requestBody []byte) (
for partitionID := int32(0); partitionID < partitionCount; partitionID++ { for partitionID := int32(0); partitionID < partitionCount; partitionID++ {
binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode
binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex
binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID
binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID
// ReplicaNodes array (4 bytes length + nodes) // ReplicaNodes array (4 bytes length + nodes)
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
// IsrNodes array (4 bytes length + nodes) // IsrNodes array (4 bytes length + nodes)
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
} }
} }
@ -1828,7 +1826,6 @@ func (h *Handler) handleMetadataV5ToV8(correlationID uint32, requestBody []byte,
// NOTE: Correlation ID is handled by writeResponseWithCorrelationID // NOTE: Correlation ID is handled by writeResponseWithCorrelationID
// Do NOT include it in the response body // Do NOT include it in the response body
// ThrottleTimeMs (4 bytes) - v3+ addition // ThrottleTimeMs (4 bytes) - v3+ addition
binary.Write(&buf, binary.BigEndian, int32(0)) // No throttling binary.Write(&buf, binary.BigEndian, int32(0)) // No throttling
@ -1896,7 +1893,7 @@ func (h *Handler) handleMetadataV5ToV8(correlationID uint32, requestBody []byte,
for partitionID := int32(0); partitionID < partitionCount; partitionID++ { for partitionID := int32(0); partitionID < partitionCount; partitionID++ {
binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode
binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex
binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID
binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID
// LeaderEpoch (4 bytes) - v7+ addition // LeaderEpoch (4 bytes) - v7+ addition
if apiVersion >= 7 { if apiVersion >= 7 {
@ -1905,11 +1902,11 @@ func (h *Handler) handleMetadataV5ToV8(correlationID uint32, requestBody []byte,
// ReplicaNodes array (4 bytes length + nodes) // ReplicaNodes array (4 bytes length + nodes)
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
// IsrNodes array (4 bytes length + nodes) // IsrNodes array (4 bytes length + nodes)
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
// OfflineReplicas array (4 bytes length + nodes) - v5+ addition // OfflineReplicas array (4 bytes length + nodes) - v5+ addition
binary.Write(&buf, binary.BigEndian, int32(0)) // No offline replicas binary.Write(&buf, binary.BigEndian, int32(0)) // No offline replicas
@ -1994,7 +1991,6 @@ func (h *Handler) handleListOffsets(correlationID uint32, apiVersion uint16, req
// Parse minimal request to understand what's being asked (header already stripped) // Parse minimal request to understand what's being asked (header already stripped)
offset := 0 offset := 0
maxBytes := len(requestBody) maxBytes := len(requestBody)
if maxBytes > 64 { if maxBytes > 64 {
maxBytes = 64 maxBytes = 64
@ -3930,7 +3926,6 @@ func (h *Handler) handleInitProducerId(correlationID uint32, apiVersion uint16,
// v2+: transactional_id(NULLABLE_STRING) + transaction_timeout_ms(INT32) + producer_id(INT64) + producer_epoch(INT16) // v2+: transactional_id(NULLABLE_STRING) + transaction_timeout_ms(INT32) + producer_id(INT64) + producer_epoch(INT16)
// v4+: Uses flexible format with tagged fields // v4+: Uses flexible format with tagged fields
maxBytes := len(requestBody) maxBytes := len(requestBody)
if maxBytes > 64 { if maxBytes > 64 {
maxBytes = 64 maxBytes = 64

1
weed/mq/kafka/protocol/offset_storage_adapter.go

@ -47,4 +47,3 @@ func (a *offsetStorageAdapter) DeleteGroup(group string) error {
func (a *offsetStorageAdapter) Close() error { func (a *offsetStorageAdapter) Close() error {
return a.storage.Close() return a.storage.Close()
} }

1
weed/mq/kafka/protocol/response_validation_example_test.go

@ -140,4 +140,3 @@ func TestMetadataResponseHasBrokers(t *testing.T) {
t.Logf("✓ Metadata response correctly has %d broker(s)", parsedCount) t.Logf("✓ Metadata response correctly has %d broker(s)", parsedCount)
} }

52
weed/mq/kafka/schema/envelope_test.go

@ -7,46 +7,46 @@ import (
func TestParseConfluentEnvelope(t *testing.T) { func TestParseConfluentEnvelope(t *testing.T) {
tests := []struct { tests := []struct {
name string
input []byte
expectOK bool
expectID uint32
name string
input []byte
expectOK bool
expectID uint32
expectFormat Format expectFormat Format
}{ }{
{ {
name: "valid Avro message",
input: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x10, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, // schema ID 1 + "Hello"
expectOK: true,
expectID: 1,
name: "valid Avro message",
input: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x10, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, // schema ID 1 + "Hello"
expectOK: true,
expectID: 1,
expectFormat: FormatAvro, expectFormat: FormatAvro,
}, },
{ {
name: "valid message with larger schema ID",
input: []byte{0x00, 0x00, 0x00, 0x04, 0xd2, 0x02, 0x66, 0x6f, 0x6f}, // schema ID 1234 + "foo"
expectOK: true,
expectID: 1234,
name: "valid message with larger schema ID",
input: []byte{0x00, 0x00, 0x00, 0x04, 0xd2, 0x02, 0x66, 0x6f, 0x6f}, // schema ID 1234 + "foo"
expectOK: true,
expectID: 1234,
expectFormat: FormatAvro, expectFormat: FormatAvro,
}, },
{ {
name: "too short message",
input: []byte{0x00, 0x00, 0x00},
expectOK: false,
name: "too short message",
input: []byte{0x00, 0x00, 0x00},
expectOK: false,
}, },
{ {
name: "no magic byte",
input: []byte{0x01, 0x00, 0x00, 0x00, 0x01, 0x48, 0x65, 0x6c, 0x6c, 0x6f},
expectOK: false,
name: "no magic byte",
input: []byte{0x01, 0x00, 0x00, 0x00, 0x01, 0x48, 0x65, 0x6c, 0x6c, 0x6f},
expectOK: false,
}, },
{ {
name: "empty message",
input: []byte{},
expectOK: false,
name: "empty message",
input: []byte{},
expectOK: false,
}, },
{ {
name: "minimal valid message",
input: []byte{0x00, 0x00, 0x00, 0x00, 0x01}, // schema ID 1, empty payload
expectOK: true,
expectID: 1,
name: "minimal valid message",
input: []byte{0x00, 0x00, 0x00, 0x00, 0x01}, // schema ID 1, empty payload
expectOK: true,
expectID: 1,
expectFormat: FormatAvro, expectFormat: FormatAvro,
}, },
} }
@ -297,7 +297,7 @@ func TestEnvelopeMetadata(t *testing.T) {
func BenchmarkParseConfluentEnvelope(b *testing.B) { func BenchmarkParseConfluentEnvelope(b *testing.B) {
// Create a test message // Create a test message
testMsg := make([]byte, 1024) testMsg := make([]byte, 1024)
testMsg[0] = 0x00 // Magic byte
testMsg[0] = 0x00 // Magic byte
binary.BigEndian.PutUint32(testMsg[1:5], 123) // Schema ID binary.BigEndian.PutUint32(testMsg[1:5], 123) // Schema ID
// Fill rest with dummy data // Fill rest with dummy data
for i := 5; i < len(testMsg); i++ { for i := 5; i < len(testMsg); i++ {

2
weed/mq/metadata_constants.go

@ -17,5 +17,3 @@ const (
// Source file tracking for parquet deduplication // Source file tracking for parquet deduplication
ExtendedAttrSources = "sources" // JSON-encoded list of source log files ExtendedAttrSources = "sources" // JSON-encoded list of source log files
) )

10
weed/mq/offset/migration.go

@ -261,11 +261,11 @@ func CreateDatabase(dbPath string) (*sql.DB, error) {
// Configure SQLite for better performance // Configure SQLite for better performance
pragmas := []string{ pragmas := []string{
"PRAGMA journal_mode=WAL", // Write-Ahead Logging for better concurrency
"PRAGMA synchronous=NORMAL", // Balance between safety and performance
"PRAGMA cache_size=10000", // Increase cache size
"PRAGMA foreign_keys=ON", // Enable foreign key constraints
"PRAGMA temp_store=MEMORY", // Store temporary tables in memory
"PRAGMA journal_mode=WAL", // Write-Ahead Logging for better concurrency
"PRAGMA synchronous=NORMAL", // Balance between safety and performance
"PRAGMA cache_size=10000", // Increase cache size
"PRAGMA foreign_keys=ON", // Enable foreign key constraints
"PRAGMA temp_store=MEMORY", // Store temporary tables in memory
} }
for _, pragma := range pragmas { for _, pragma := range pragmas {

8
weed/mq/schema/flat_schema_utils_test.go

@ -249,10 +249,10 @@ func TestValidateKeyColumns(t *testing.T) {
// Helper function to check if string contains substring // Helper function to check if string contains substring
func contains(str, substr string) bool { func contains(str, substr string) bool {
return len(str) >= len(substr) && return len(str) >= len(substr) &&
(len(substr) == 0 || str[len(str)-len(substr):] == substr ||
str[:len(substr)] == substr ||
len(str) > len(substr) && (str[len(str)-len(substr)-1:len(str)-len(substr)] == " " || str[len(str)-len(substr)-1] == ' ') && str[len(str)-len(substr):] == substr ||
findInString(str, substr))
(len(substr) == 0 || str[len(str)-len(substr):] == substr ||
str[:len(substr)] == substr ||
len(str) > len(substr) && (str[len(str)-len(substr)-1:len(str)-len(substr)] == " " || str[len(str)-len(substr)-1] == ' ') && str[len(str)-len(substr):] == substr ||
findInString(str, substr))
} }
func findInString(str, substr string) bool { func findInString(str, substr string) bool {

2
weed/pb/mq_agent_pb/publish_response_test.go

@ -1,8 +1,8 @@
package mq_agent_pb package mq_agent_pb
import ( import (
"testing"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"testing"
) )
func TestPublishRecordResponseSerialization(t *testing.T) { func TestPublishRecordResponseSerialization(t *testing.T) {

6
weed/pb/schema_pb/offset_test.go

@ -1,8 +1,8 @@
package schema_pb package schema_pb
import ( import (
"testing"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"testing"
) )
func TestOffsetTypeEnums(t *testing.T) { func TestOffsetTypeEnums(t *testing.T) {
@ -34,8 +34,8 @@ func TestPartitionOffsetSerialization(t *testing.T) {
RangeStop: 31, RangeStop: 31,
UnixTimeNs: 1234567890, UnixTimeNs: 1234567890,
}, },
StartTsNs: 1234567890,
StartOffset: 42, // New field
StartTsNs: 1234567890,
StartOffset: 42, // New field
} }
// Test proto marshaling/unmarshaling // Test proto marshaling/unmarshaling

18
weed/remote_storage/azure/azure_storage_client_test.go

@ -229,22 +229,22 @@ func TestToMetadata(t *testing.T) {
s3_constants.AmzUserMetaPrefix + "789": []byte("value3"), s3_constants.AmzUserMetaPrefix + "789": []byte("value3"),
}, },
expected: map[string]*string{ expected: map[string]*string{
"_123key": stringPtr("value1"), // starts with digit -> prefix _
"_456_2d_test": stringPtr("value2"), // starts with digit AND has dash
"_789": stringPtr("value3"),
"_123key": stringPtr("value1"), // starts with digit -> prefix _
"_456_2d_test": stringPtr("value2"), // starts with digit AND has dash
"_789": stringPtr("value3"),
}, },
}, },
{ {
name: "uppercase and mixed case keys", name: "uppercase and mixed case keys",
input: map[string][]byte{ input: map[string][]byte{
s3_constants.AmzUserMetaPrefix + "My-Key": []byte("value1"),
s3_constants.AmzUserMetaPrefix + "UPPERCASE": []byte("value2"),
s3_constants.AmzUserMetaPrefix + "MiXeD-CaSe": []byte("value3"),
s3_constants.AmzUserMetaPrefix + "My-Key": []byte("value1"),
s3_constants.AmzUserMetaPrefix + "UPPERCASE": []byte("value2"),
s3_constants.AmzUserMetaPrefix + "MiXeD-CaSe": []byte("value3"),
}, },
expected: map[string]*string{ expected: map[string]*string{
"my_2d_key": stringPtr("value1"), // lowercase + dash -> _2d_
"uppercase": stringPtr("value2"),
"mixed_2d_case": stringPtr("value3"),
"my_2d_key": stringPtr("value1"), // lowercase + dash -> _2d_
"uppercase": stringPtr("value2"),
"mixed_2d_case": stringPtr("value3"),
}, },
}, },
{ {

10
weed/util/log_buffer/log_buffer_flush_gap_test.go

@ -15,10 +15,11 @@ import (
// are lost in the gap between flushed disk data and in-memory buffer. // are lost in the gap between flushed disk data and in-memory buffer.
// //
// OBSERVED BEHAVIOR FROM LOGS: // OBSERVED BEHAVIOR FROM LOGS:
// Request offset: 1764
// Disk contains: 1000-1763 (764 messages)
// Memory buffer starts at: 1800
// Gap: 1764-1799 (36 messages) ← MISSING!
//
// Request offset: 1764
// Disk contains: 1000-1763 (764 messages)
// Memory buffer starts at: 1800
// Gap: 1764-1799 (36 messages) ← MISSING!
// //
// This test verifies: // This test verifies:
// 1. All messages sent to buffer are accounted for // 1. All messages sent to buffer are accounted for
@ -677,4 +678,3 @@ func TestFlushOffsetGap_ForceFlushAdvancesBuffer(t *testing.T) {
} }
flushMu.Unlock() flushMu.Unlock()
} }

2
weed/worker/client.go

@ -24,7 +24,7 @@ type GrpcAdminClient struct {
workerID string workerID string
dialOption grpc.DialOption dialOption grpc.DialOption
cmds chan grpcCommand
cmds chan grpcCommand
// Reconnection parameters // Reconnection parameters
maxReconnectAttempts int maxReconnectAttempts int

Loading…
Cancel
Save