Browse Source
mq(kafka): Major debugging progress on Metadata v7 compatibility
mq(kafka): Major debugging progress on Metadata v7 compatibility
BREAKTHROUGH DISCOVERIES: ✅ Performance issue SOLVED: Debug logging was causing 6.8s delays → now 20μs ✅ Metadata v7 format partially working: kafka-go accepts response (no disconnect) ✅ kafka-go workflow confirmed: Never calls Produce API - validates Metadata first CURRENT ISSUE IDENTIFIED: ❌ kafka-go validates Metadata response → returns '[3] Unknown Topic Or Partition' ❌ Error comes from kafka-go's internal validation, not our API handlers ❌ kafka-go retries with more Metadata requests (normal retry behavior) DEBUGGING IMPLEMENTED: - Added comprehensive API request logging to confirm request flow - Added detailed Produce API debugging (unused but ready) - Added Metadata response hex dumps for format validation - Confirmed no unsupported API calls being made METADATA V7 COMPLIANCE: ✅ Added cluster authorized operations field ✅ Added topic UUID fields (16-byte null UUID) ✅ Added is_internal_topic field ✅ Added topic authorized operations field ✅ Response format appears correct (120 bytes) NEXT: Debug why kafka-go rejects our otherwise well-formed Metadata v7 response. Likely broker address mismatch, partition state issue, or missing v7 field.pull/7231/head
4 changed files with 191 additions and 52 deletions
-
67test/kafka/api_sequence_test.go
-
6test/kafka/produce_consume_test.go
-
108weed/mq/kafka/protocol/handler.go
-
16weed/mq/kafka/protocol/produce.go
@ -0,0 +1,67 @@ |
|||
package kafka |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"testing" |
|||
"time" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/mq/kafka/gateway" |
|||
"github.com/segmentio/kafka-go" |
|||
) |
|||
|
|||
// TestKafkaGateway_APISequence logs all API requests that kafka-go makes
|
|||
func TestKafkaGateway_APISequence(t *testing.T) { |
|||
// Start the gateway server
|
|||
srv := gateway.NewServer(gateway.Options{ |
|||
Listen: ":0", |
|||
UseSeaweedMQ: false, |
|||
}) |
|||
|
|||
if err := srv.Start(); err != nil { |
|||
t.Fatalf("Failed to start gateway: %v", err) |
|||
} |
|||
defer srv.Close() |
|||
|
|||
brokerAddr := srv.Addr() |
|||
t.Logf("Gateway running on %s", brokerAddr) |
|||
|
|||
// Pre-create topic
|
|||
topicName := "api-sequence-topic" |
|||
handler := srv.GetHandler() |
|||
handler.AddTopicForTesting(topicName, 1) |
|||
|
|||
// Create a writer and try to write a single message
|
|||
writer := &kafka.Writer{ |
|||
Addr: kafka.TCP(brokerAddr), |
|||
Topic: topicName, |
|||
WriteTimeout: 15 * time.Second, |
|||
ReadTimeout: 15 * time.Second, |
|||
Logger: kafka.LoggerFunc(func(msg string, args ...interface{}) { |
|||
fmt.Printf("KAFKA-GO WRITER LOG: "+msg+"\n", args...) |
|||
}), |
|||
ErrorLogger: kafka.LoggerFunc(func(msg string, args ...interface{}) { |
|||
fmt.Printf("KAFKA-GO WRITER ERROR: "+msg+"\n", args...) |
|||
}), |
|||
} |
|||
defer writer.Close() |
|||
|
|||
// Try to write a single message and log the full API sequence
|
|||
ctx, cancel := context.WithTimeout(context.Background(), 12*time.Second) |
|||
defer cancel() |
|||
|
|||
fmt.Printf("\n=== STARTING kafka-go WRITE ATTEMPT ===\n") |
|||
|
|||
err := writer.WriteMessages(ctx, kafka.Message{ |
|||
Key: []byte("test-key"), |
|||
Value: []byte("test-value"), |
|||
}) |
|||
|
|||
fmt.Printf("\n=== kafka-go WRITE COMPLETED ===\n") |
|||
|
|||
if err != nil { |
|||
t.Logf("WriteMessages result: %v", err) |
|||
} else { |
|||
t.Logf("WriteMessages succeeded!") |
|||
} |
|||
} |
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue