Browse Source

protocol: align request parsing with Kafka specs; remove client_id skips; revert OffsetFetch v0-v5 to classic encodings; adjust FindCoordinator parsing; update ApiVersions Metadata max v7; fix tests to pass apiVersion and expectations

pull/7231/head
chrislu 2 months ago
parent
commit
48a0b49880
  1. 172
      weed/mq/kafka/protocol/fetch_test.go
  2. 4
      weed/mq/kafka/protocol/handler.go
  3. 275
      weed/mq/kafka/protocol/handler_test.go
  4. 128
      weed/mq/kafka/protocol/produce_test.go

172
weed/mq/kafka/protocol/fetch_test.go

@ -9,7 +9,7 @@ import (
func TestHandler_handleFetch(t *testing.T) {
h := NewHandler()
correlationID := uint32(666)
// Create a topic and add some records
topicName := "test-topic"
h.topics[topicName] = &TopicInfo{
@ -17,7 +17,7 @@ func TestHandler_handleFetch(t *testing.T) {
Partitions: 1,
CreatedAt: time.Now().UnixNano(),
}
// Add some records to the ledger
ledger := h.GetOrCreateLedger(topicName, 0)
baseOffset := ledger.AssignOffsets(3)
@ -25,139 +25,139 @@ func TestHandler_handleFetch(t *testing.T) {
ledger.AppendRecord(baseOffset+0, currentTime+0, 100)
ledger.AppendRecord(baseOffset+1, currentTime+1000, 200)
ledger.AppendRecord(baseOffset+2, currentTime+2000, 150)
// Build a Fetch request
clientID := "test-consumer"
requestBody := make([]byte, 0, 256)
// Client ID
requestBody = append(requestBody, 0, byte(len(clientID)))
requestBody = append(requestBody, []byte(clientID)...)
// Replica ID (-1 for consumer)
requestBody = append(requestBody, 0xFF, 0xFF, 0xFF, 0xFF)
// Max wait time (5000ms)
requestBody = append(requestBody, 0, 0, 0x13, 0x88)
// Min bytes (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Max bytes (1MB)
requestBody = append(requestBody, 0, 0x10, 0, 0)
// Isolation level (0 = read uncommitted)
requestBody = append(requestBody, 0)
// Session ID (0)
requestBody = append(requestBody, 0, 0, 0, 0)
// Epoch (0)
requestBody = append(requestBody, 0, 0, 0, 0)
// Topics count (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Topic name
requestBody = append(requestBody, 0, byte(len(topicName)))
requestBody = append(requestBody, []byte(topicName)...)
// Partitions count (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Partition 0
requestBody = append(requestBody, 0, 0, 0, 0) // partition ID
requestBody = append(requestBody, 0, 0, 0, 0) // current leader epoch
requestBody = append(requestBody, 0, 0, 0, 0) // partition ID
requestBody = append(requestBody, 0, 0, 0, 0) // current leader epoch
requestBody = append(requestBody, 0, 0, 0, 0, 0, 0, 0, byte(baseOffset)) // fetch offset
requestBody = append(requestBody, 0, 0, 0, 0, 0, 0, 0, 0) // log start offset
requestBody = append(requestBody, 0, 0, 0x10, 0) // partition max bytes (1MB)
response, err := h.handleFetch(correlationID, requestBody)
requestBody = append(requestBody, 0, 0, 0, 0, 0, 0, 0, 0) // log start offset
requestBody = append(requestBody, 0, 0, 0x10, 0) // partition max bytes (1MB)
response, err := h.handleFetch(correlationID, 7, requestBody)
if err != nil {
t.Fatalf("handleFetch: %v", err)
}
if len(response) < 60 { // minimum expected size
t.Fatalf("response too short: %d bytes", len(response))
}
// Check response structure
respCorrelationID := binary.BigEndian.Uint32(response[0:4])
if respCorrelationID != correlationID {
t.Errorf("correlation ID: got %d, want %d", respCorrelationID, correlationID)
}
// Check throttle time
throttleTime := binary.BigEndian.Uint32(response[4:8])
if throttleTime != 0 {
t.Errorf("throttle time: got %d, want 0", throttleTime)
}
// Check error code
errorCode := binary.BigEndian.Uint16(response[8:10])
if errorCode != 0 {
t.Errorf("error code: got %d, want 0", errorCode)
}
// Parse response structure (simplified validation)
offset := 14 // skip correlation_id + throttle_time + error_code + session_id
topicsCount := binary.BigEndian.Uint32(response[offset : offset+4])
if topicsCount != 1 {
t.Errorf("topics count: got %d, want 1", topicsCount)
}
offset += 4
respTopicNameSize := binary.BigEndian.Uint16(response[offset : offset+2])
offset += 2
if respTopicNameSize != uint16(len(topicName)) {
t.Errorf("response topic name size: got %d, want %d", respTopicNameSize, len(topicName))
}
respTopicName := string(response[offset : offset+int(respTopicNameSize)])
offset += int(respTopicNameSize)
if respTopicName != topicName {
t.Errorf("response topic name: got %s, want %s", respTopicName, topicName)
}
// Partitions count
respPartitionsCount := binary.BigEndian.Uint32(response[offset : offset+4])
offset += 4
if respPartitionsCount != 1 {
t.Errorf("response partitions count: got %d, want 1", respPartitionsCount)
}
// Partition ID
partitionID := binary.BigEndian.Uint32(response[offset : offset+4])
offset += 4
if partitionID != 0 {
t.Errorf("partition ID: got %d, want 0", partitionID)
}
// Partition error code
partitionErrorCode := binary.BigEndian.Uint16(response[offset : offset+2])
offset += 2
if partitionErrorCode != 0 {
t.Errorf("partition error code: got %d, want 0", partitionErrorCode)
}
// High water mark
highWaterMark := int64(binary.BigEndian.Uint64(response[offset : offset+8]))
offset += 8
if highWaterMark != 3 { // baseOffset + 3 records
t.Errorf("high water mark: got %d, want %d", highWaterMark, baseOffset+3)
}
// Skip last_stable_offset, log_start_offset, aborted_transactions_count
offset += 8 + 8 + 4
// Records size
recordsSize := binary.BigEndian.Uint32(response[offset : offset+4])
offset += 4
if recordsSize == 0 {
t.Errorf("expected some records, got size 0")
}
// Verify we have records data
if len(response) < offset+int(recordsSize) {
t.Errorf("response shorter than expected records size")
@ -167,48 +167,48 @@ func TestHandler_handleFetch(t *testing.T) {
func TestHandler_handleFetch_UnknownTopic(t *testing.T) {
h := NewHandler()
correlationID := uint32(777)
// Build Fetch request for non-existent topic
clientID := "test-consumer"
topicName := "non-existent-topic"
requestBody := make([]byte, 0, 128)
// Client ID
requestBody = append(requestBody, 0, byte(len(clientID)))
requestBody = append(requestBody, []byte(clientID)...)
// Standard Fetch parameters
requestBody = append(requestBody, 0xFF, 0xFF, 0xFF, 0xFF) // replica ID
requestBody = append(requestBody, 0, 0, 0x13, 0x88) // max wait time
requestBody = append(requestBody, 0, 0, 0, 1) // min bytes
requestBody = append(requestBody, 0, 0x10, 0, 0) // max bytes
requestBody = append(requestBody, 0) // isolation level
requestBody = append(requestBody, 0, 0, 0, 0) // session ID
requestBody = append(requestBody, 0, 0, 0, 0) // epoch
requestBody = append(requestBody, 0, 0, 0x13, 0x88) // max wait time
requestBody = append(requestBody, 0, 0, 0, 1) // min bytes
requestBody = append(requestBody, 0, 0x10, 0, 0) // max bytes
requestBody = append(requestBody, 0) // isolation level
requestBody = append(requestBody, 0, 0, 0, 0) // session ID
requestBody = append(requestBody, 0, 0, 0, 0) // epoch
// Topics count (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Topic name
requestBody = append(requestBody, 0, byte(len(topicName)))
requestBody = append(requestBody, []byte(topicName)...)
// Partitions count (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Partition 0
requestBody = append(requestBody, 0, 0, 0, 0) // partition ID
requestBody = append(requestBody, 0, 0, 0, 0) // current leader epoch
requestBody = append(requestBody, 0, 0, 0, 0, 0, 0, 0, 0) // fetch offset
requestBody = append(requestBody, 0, 0, 0, 0, 0, 0, 0, 0) // log start offset
requestBody = append(requestBody, 0, 0, 0x10, 0) // partition max bytes
response, err := h.handleFetch(correlationID, requestBody)
requestBody = append(requestBody, 0, 0, 0, 0) // partition ID
requestBody = append(requestBody, 0, 0, 0, 0) // current leader epoch
requestBody = append(requestBody, 0, 0, 0, 0, 0, 0, 0, 0) // fetch offset
requestBody = append(requestBody, 0, 0, 0, 0, 0, 0, 0, 0) // log start offset
requestBody = append(requestBody, 0, 0, 0x10, 0) // partition max bytes
response, err := h.handleFetch(correlationID, 7, requestBody)
if err != nil {
t.Fatalf("handleFetch: %v", err)
}
// Parse response to check for UNKNOWN_TOPIC_OR_PARTITION error
offset := 14 + 4 + 2 + len(topicName) + 4 + 4 // skip to partition error code
partitionErrorCode := binary.BigEndian.Uint16(response[offset : offset+2])
@ -220,7 +220,7 @@ func TestHandler_handleFetch_UnknownTopic(t *testing.T) {
func TestHandler_handleFetch_EmptyPartition(t *testing.T) {
h := NewHandler()
correlationID := uint32(888)
// Create a topic but don't add any records
topicName := "empty-topic"
h.topics[topicName] = &TopicInfo{
@ -228,65 +228,65 @@ func TestHandler_handleFetch_EmptyPartition(t *testing.T) {
Partitions: 1,
CreatedAt: time.Now().UnixNano(),
}
// Get ledger but don't add records
ledger := h.GetOrCreateLedger(topicName, 0)
_ = ledger // ledger exists but is empty
// Build Fetch request
clientID := "test-consumer"
requestBody := make([]byte, 0, 128)
// Client ID
requestBody = append(requestBody, 0, byte(len(clientID)))
requestBody = append(requestBody, []byte(clientID)...)
// Standard parameters
requestBody = append(requestBody, 0xFF, 0xFF, 0xFF, 0xFF) // replica ID
requestBody = append(requestBody, 0, 0, 0x13, 0x88) // max wait time
requestBody = append(requestBody, 0, 0, 0, 1) // min bytes
requestBody = append(requestBody, 0, 0x10, 0, 0) // max bytes
requestBody = append(requestBody, 0) // isolation level
requestBody = append(requestBody, 0, 0, 0, 0) // session ID
requestBody = append(requestBody, 0, 0, 0, 0) // epoch
requestBody = append(requestBody, 0, 0, 0x13, 0x88) // max wait time
requestBody = append(requestBody, 0, 0, 0, 1) // min bytes
requestBody = append(requestBody, 0, 0x10, 0, 0) // max bytes
requestBody = append(requestBody, 0) // isolation level
requestBody = append(requestBody, 0, 0, 0, 0) // session ID
requestBody = append(requestBody, 0, 0, 0, 0) // epoch
// Topics count (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Topic name
requestBody = append(requestBody, 0, byte(len(topicName)))
requestBody = append(requestBody, []byte(topicName)...)
// Partitions count (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Partition 0 - fetch from offset 0
requestBody = append(requestBody, 0, 0, 0, 0) // partition ID
requestBody = append(requestBody, 0, 0, 0, 0) // current leader epoch
requestBody = append(requestBody, 0, 0, 0, 0, 0, 0, 0, 0) // fetch offset
requestBody = append(requestBody, 0, 0, 0, 0, 0, 0, 0, 0) // log start offset
requestBody = append(requestBody, 0, 0, 0x10, 0) // partition max bytes
response, err := h.handleFetch(correlationID, requestBody)
response, err := h.handleFetch(correlationID, 7, requestBody)
if err != nil {
t.Fatalf("handleFetch: %v", err)
}
// Parse response - should have no error but empty records
offset := 14 + 4 + 2 + len(topicName) + 4 + 4 // skip to partition error code
partitionErrorCode := binary.BigEndian.Uint16(response[offset : offset+2])
if partitionErrorCode != 0 {
t.Errorf("partition error code: got %d, want 0", partitionErrorCode)
}
// High water mark should be 0
offset += 2
highWaterMark := int64(binary.BigEndian.Uint64(response[offset : offset+8]))
if highWaterMark != 0 {
t.Errorf("high water mark: got %d, want 0", highWaterMark)
}
// Skip to records size
offset += 8 + 8 + 4 // skip last_stable_offset, log_start_offset, aborted_transactions_count
recordsSize := binary.BigEndian.Uint32(response[offset : offset+4])
@ -297,41 +297,41 @@ func TestHandler_handleFetch_EmptyPartition(t *testing.T) {
func TestHandler_constructRecordBatch(t *testing.T) {
h := NewHandler()
// Test with simple parameters
records := h.constructRecordBatch(nil, 0, 3)
if len(records) == 0 {
t.Errorf("expected some records, got empty")
}
// Should have proper record batch structure
if len(records) < 61 { // minimum record batch header size
t.Errorf("record batch too small: %d bytes", len(records))
}
// Check base offset
baseOffset := int64(binary.BigEndian.Uint64(records[0:8]))
if baseOffset != 0 {
t.Errorf("base offset: got %d, want 0", baseOffset)
}
// Check magic byte
if records[16] != 2 {
t.Errorf("magic byte: got %d, want 2", records[16])
}
// Test with no records to fetch
emptyRecords := h.constructRecordBatch(nil, 5, 5)
if len(emptyRecords) != 0 {
t.Errorf("expected empty records, got %d bytes", len(emptyRecords))
}
// Test with large range (should be limited)
largeRecords := h.constructRecordBatch(nil, 0, 100)
if len(largeRecords) == 0 {
t.Errorf("expected some records for large range")
}
// Should be limited to reasonable size
if len(largeRecords) > 10000 { // arbitrary reasonable limit
t.Errorf("record batch too large: %d bytes", len(largeRecords))

4
weed/mq/kafka/protocol/handler.go

@ -429,7 +429,7 @@ func (h *Handler) handleApiVersions(correlationID uint32) ([]byte, error) {
// Sarama works with v4, kafka-go should also work with v4
response = append(response, 0, 3) // API key 3
response = append(response, 0, 0) // min version 0
response = append(response, 0, 4) // max version 4 (was 6)
response = append(response, 0, 7) // max version 7
// API Key 2 (ListOffsets): api_key(2) + min_version(2) + max_version(2)
response = append(response, 0, 2) // API key 2
@ -1152,7 +1152,7 @@ func (h *Handler) handleListOffsets(correlationID uint32, apiVersion uint16, req
return nil, fmt.Errorf("ListOffsets request too short")
}
// Skip client_id: client_id_size(2) + client_id_data
// Skip client_id: client_id_size(2) + topics_count(4)
clientIDSize := binary.BigEndian.Uint16(requestBody[0:2])
offset := 2 + int(clientIDSize)

275
weed/mq/kafka/protocol/handler_test.go

@ -92,20 +92,20 @@ func TestHandler_ApiVersions(t *testing.T) {
// Check number of API keys
numAPIKeys := binary.BigEndian.Uint32(respBuf[6:10])
if numAPIKeys != 13 {
t.Errorf("expected 13 API keys, got: %d", numAPIKeys)
if numAPIKeys != 14 {
t.Errorf("expected 14 API keys, got: %d", numAPIKeys)
}
// Check API key details: api_key(2) + min_version(2) + max_version(2)
if len(respBuf) < 52 { // need space for 7 API keys
t.Fatalf("response too short for API key data")
}
// First API key (ApiVersions)
apiKey := binary.BigEndian.Uint16(respBuf[10:12])
minVersion := binary.BigEndian.Uint16(respBuf[12:14])
maxVersion := binary.BigEndian.Uint16(respBuf[14:16])
if apiKey != 18 {
t.Errorf("expected API key 18, got: %d", apiKey)
}
@ -115,12 +115,12 @@ func TestHandler_ApiVersions(t *testing.T) {
if maxVersion != 3 {
t.Errorf("expected max version 3, got: %d", maxVersion)
}
// Second API key (Metadata)
apiKey2 := binary.BigEndian.Uint16(respBuf[16:18])
minVersion2 := binary.BigEndian.Uint16(respBuf[18:20])
maxVersion2 := binary.BigEndian.Uint16(respBuf[20:22])
if apiKey2 != 3 {
t.Errorf("expected API key 3, got: %d", apiKey2)
}
@ -130,12 +130,12 @@ func TestHandler_ApiVersions(t *testing.T) {
if maxVersion2 != 7 {
t.Errorf("expected max version 7, got: %d", maxVersion2)
}
// Third API key (ListOffsets)
apiKey3 := binary.BigEndian.Uint16(respBuf[22:24])
minVersion3 := binary.BigEndian.Uint16(respBuf[24:26])
maxVersion3 := binary.BigEndian.Uint16(respBuf[26:28])
if apiKey3 != 2 {
t.Errorf("expected API key 2, got: %d", apiKey3)
}
@ -145,12 +145,12 @@ func TestHandler_ApiVersions(t *testing.T) {
if maxVersion3 != 5 {
t.Errorf("expected max version 5, got: %d", maxVersion3)
}
// Fourth API key (CreateTopics)
apiKey4 := binary.BigEndian.Uint16(respBuf[28:30])
minVersion4 := binary.BigEndian.Uint16(respBuf[30:32])
maxVersion4 := binary.BigEndian.Uint16(respBuf[32:34])
if apiKey4 != 19 {
t.Errorf("expected API key 19, got: %d", apiKey4)
}
@ -160,12 +160,12 @@ func TestHandler_ApiVersions(t *testing.T) {
if maxVersion4 != 4 {
t.Errorf("expected max version 4, got: %d", maxVersion4)
}
// Fifth API key (DeleteTopics)
apiKey5 := binary.BigEndian.Uint16(respBuf[34:36])
minVersion5 := binary.BigEndian.Uint16(respBuf[36:38])
maxVersion5 := binary.BigEndian.Uint16(respBuf[38:40])
if apiKey5 != 20 {
t.Errorf("expected API key 20, got: %d", apiKey5)
}
@ -175,12 +175,12 @@ func TestHandler_ApiVersions(t *testing.T) {
if maxVersion5 != 4 {
t.Errorf("expected max version 4, got: %d", maxVersion5)
}
// Sixth API key (Produce)
apiKey6 := binary.BigEndian.Uint16(respBuf[40:42])
minVersion6 := binary.BigEndian.Uint16(respBuf[42:44])
maxVersion6 := binary.BigEndian.Uint16(respBuf[44:46])
if apiKey6 != 0 {
t.Errorf("expected API key 0, got: %d", apiKey6)
}
@ -190,12 +190,12 @@ func TestHandler_ApiVersions(t *testing.T) {
if maxVersion6 != 7 {
t.Errorf("expected max version 7, got: %d", maxVersion6)
}
// Seventh API key (Fetch)
apiKey7 := binary.BigEndian.Uint16(respBuf[46:48])
minVersion7 := binary.BigEndian.Uint16(respBuf[48:50])
maxVersion7 := binary.BigEndian.Uint16(respBuf[50:52])
if apiKey7 != 1 {
t.Errorf("expected API key 1, got: %d", apiKey7)
}
@ -223,266 +223,135 @@ func TestHandler_ApiVersions(t *testing.T) {
func TestHandler_handleApiVersions(t *testing.T) {
h := NewHandler()
correlationID := uint32(999)
response, err := h.handleApiVersions(correlationID)
if err != nil {
t.Fatalf("handleApiVersions: %v", err)
}
if len(response) < 90 { // minimum expected size (now has 13 API keys)
t.Fatalf("response too short: %d bytes", len(response))
}
// Check correlation ID
respCorrelationID := binary.BigEndian.Uint32(response[0:4])
if respCorrelationID != correlationID {
t.Errorf("correlation ID: got %d, want %d", respCorrelationID, correlationID)
}
// Check error code
errorCode := binary.BigEndian.Uint16(response[4:6])
if errorCode != 0 {
t.Errorf("error code: got %d, want 0", errorCode)
}
// Check number of API keys
numAPIKeys := binary.BigEndian.Uint32(response[6:10])
if numAPIKeys != 13 {
t.Errorf("expected 13 API keys, got: %d", numAPIKeys)
}
// Check first API key (ApiVersions)
apiKey := binary.BigEndian.Uint16(response[10:12])
if apiKey != 18 {
t.Errorf("first API key: got %d, want 18", apiKey)
}
// Check second API key (Metadata)
// Check second API key (Metadata)
apiKey2 := binary.BigEndian.Uint16(response[16:18])
if apiKey2 != 3 {
t.Errorf("second API key: got %d, want 3", apiKey2)
}
// Check third API key (ListOffsets)
apiKey3 := binary.BigEndian.Uint16(response[22:24])
if apiKey3 != 2 {
t.Errorf("third API key: got %d, want 2", apiKey3)
}
// Check fourth API key (CreateTopics)
apiKey4 := binary.BigEndian.Uint16(response[28:30])
if apiKey4 != 19 {
t.Errorf("fourth API key: got %d, want 19", apiKey4)
}
// Check fifth API key (DeleteTopics)
apiKey5 := binary.BigEndian.Uint16(response[34:36])
if apiKey5 != 20 {
t.Errorf("fifth API key: got %d, want 20", apiKey5)
}
// Check sixth API key (Produce)
apiKey6 := binary.BigEndian.Uint16(response[40:42])
if apiKey6 != 0 {
t.Errorf("sixth API key: got %d, want 0", apiKey6)
}
// Check seventh API key (Fetch)
apiKey7 := binary.BigEndian.Uint16(response[46:48])
if apiKey7 != 1 {
t.Errorf("seventh API key: got %d, want 1", apiKey7)
}
}
func TestHandler_handleMetadata(t *testing.T) {
h := NewHandler()
correlationID := uint32(456)
// Empty request body for minimal test
requestBody := []byte{}
response, err := h.handleMetadata(correlationID, requestBody)
response, err := h.handleMetadata(correlationID, 0, requestBody)
if err != nil {
t.Fatalf("handleMetadata: %v", err)
}
if len(response) < 40 { // minimum expected size
if len(response) < 31 { // minimum expected size for v0 (calculated)
t.Fatalf("response too short: %d bytes", len(response))
}
// Check correlation ID
respCorrelationID := binary.BigEndian.Uint32(response[0:4])
if respCorrelationID != correlationID {
t.Errorf("correlation ID: got %d, want %d", respCorrelationID, correlationID)
}
// Check throttle time
throttleTime := binary.BigEndian.Uint32(response[4:8])
if throttleTime != 0 {
t.Errorf("throttle time: got %d, want 0", throttleTime)
}
// Check brokers count
brokersCount := binary.BigEndian.Uint32(response[8:12])
brokersCount := binary.BigEndian.Uint32(response[4:8])
if brokersCount != 1 {
t.Errorf("brokers count: got %d, want 1", brokersCount)
}
// Check first broker node ID
nodeID := binary.BigEndian.Uint32(response[12:16])
if nodeID != 0 {
t.Errorf("broker node ID: got %d, want 0", nodeID)
}
// Check host string length
hostLen := binary.BigEndian.Uint16(response[16:18])
expectedHost := "localhost"
if hostLen != uint16(len(expectedHost)) {
t.Errorf("host length: got %d, want %d", hostLen, len(expectedHost))
}
// Check host string
if string(response[18:18+hostLen]) != expectedHost {
t.Errorf("host: got %s, want %s", string(response[18:18+hostLen]), expectedHost)
}
// Check port
portStart := 18 + int(hostLen)
port := binary.BigEndian.Uint32(response[portStart:portStart+4])
if port != 9092 {
t.Errorf("port: got %d, want 9092", port)
}
}
func TestHandler_handleListOffsets(t *testing.T) {
h := NewHandler()
correlationID := uint32(123)
// Build a simple ListOffsets request: client_id + topics
// client_id_size(2) + client_id + topics_count(4) + topic + partitions
clientID := "test"
topic := "test-topic"
requestBody := make([]byte, 0, 64)
// Client ID
requestBody = append(requestBody, 0, byte(len(clientID)))
requestBody = append(requestBody, []byte(clientID)...)
// Topics count (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Topic name
requestBody = append(requestBody, 0, byte(len(topic)))
requestBody = append(requestBody, []byte(topic)...)
// Partitions count (2 partitions)
requestBody = append(requestBody, 0, 0, 0, 2)
// Partition 0: partition_id(4) + timestamp(8) - earliest
requestBody = append(requestBody, 0, 0, 0, 0) // partition 0
requestBody = append(requestBody, 0, 0, 0, 0) // partition 0
requestBody = append(requestBody, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE) // -2 (earliest)
// Partition 1: partition_id(4) + timestamp(8) - latest
requestBody = append(requestBody, 0, 0, 0, 1) // partition 1
requestBody = append(requestBody, 0, 0, 0, 1) // partition 1
requestBody = append(requestBody, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF) // -1 (latest)
response, err := h.handleListOffsets(correlationID, requestBody)
response, err := h.handleListOffsets(correlationID, 0, requestBody)
if err != nil {
t.Fatalf("handleListOffsets: %v", err)
}
if len(response) < 50 { // minimum expected size
t.Fatalf("response too short: %d bytes", len(response))
}
// Check correlation ID
respCorrelationID := binary.BigEndian.Uint32(response[0:4])
if respCorrelationID != correlationID {
t.Errorf("correlation ID: got %d, want %d", respCorrelationID, correlationID)
}
// Check throttle time
throttleTime := binary.BigEndian.Uint32(response[4:8])
if throttleTime != 0 {
t.Errorf("throttle time: got %d, want 0", throttleTime)
}
// Check topics count
topicsCount := binary.BigEndian.Uint32(response[8:12])
if topicsCount != 1 {
t.Errorf("topics count: got %d, want 1", topicsCount)
}
// Check topic name
offset := 12
topicNameSize := binary.BigEndian.Uint16(response[offset : offset+2])
offset += 2
if topicNameSize != uint16(len(topic)) {
t.Errorf("topic name size: got %d, want %d", topicNameSize, len(topic))
}
responseTopic := string(response[offset : offset+int(topicNameSize)])
offset += int(topicNameSize)
if responseTopic != topic {
t.Errorf("topic name: got %s, want %s", responseTopic, topic)
}
// Check partitions count
partitionsCount := binary.BigEndian.Uint32(response[offset : offset+4])
offset += 4
if partitionsCount != 2 {
t.Errorf("partitions count: got %d, want 2", partitionsCount)
}
// Check partition 0 (earliest)
partitionID := binary.BigEndian.Uint32(response[offset : offset+4])
offset += 4
if partitionID != 0 {
t.Errorf("partition 0 ID: got %d, want 0", partitionID)
}
errorCode := binary.BigEndian.Uint16(response[offset : offset+2])
offset += 2
if errorCode != 0 {
t.Errorf("partition 0 error: got %d, want 0", errorCode)
}
timestamp := int64(binary.BigEndian.Uint64(response[offset : offset+8]))
offset += 8
if timestamp <= 0 {
t.Errorf("partition 0 timestamp: got %d, want > 0", timestamp)
}
offsetValue := int64(binary.BigEndian.Uint64(response[offset : offset+8]))
offset += 8
if offsetValue != 0 {
t.Errorf("partition 0 offset: got %d, want 0", offsetValue)
}
// Check partition 1 (latest)
partitionID = binary.BigEndian.Uint32(response[offset : offset+4])
offset += 4
if partitionID != 1 {
t.Errorf("partition 1 ID: got %d, want 1", partitionID)
}
errorCode = binary.BigEndian.Uint16(response[offset : offset+2])
offset += 2
if errorCode != 0 {
t.Errorf("partition 1 error: got %d, want 0", errorCode)
}
timestamp = int64(binary.BigEndian.Uint64(response[offset : offset+8]))
offset += 8
if timestamp <= 0 {
t.Errorf("partition 1 timestamp: got %d, want > 0", timestamp)
}
offsetValue = int64(binary.BigEndian.Uint64(response[offset : offset+8]))
if offsetValue != 0 {
t.Errorf("partition 1 offset: got %d, want 0", offsetValue)
}
}
func TestHandler_ListOffsets_EndToEnd(t *testing.T) {
@ -504,36 +373,36 @@ func TestHandler_ListOffsets_EndToEnd(t *testing.T) {
correlationID := uint32(555)
clientID := "listoffsets-test"
topic := "my-topic"
message := make([]byte, 0, 128)
message = append(message, 0, 2) // API key 2 (ListOffsets)
message = append(message, 0, 0) // API version 0
message = append(message, 0, 2) // API key 2 (ListOffsets)
message = append(message, 0, 0) // API version 0
// Correlation ID
correlationIDBytes := make([]byte, 4)
binary.BigEndian.PutUint32(correlationIDBytes, correlationID)
message = append(message, correlationIDBytes...)
// Client ID length and string
clientIDLen := uint16(len(clientID))
message = append(message, byte(clientIDLen>>8), byte(clientIDLen))
message = append(message, []byte(clientID)...)
// Topics count (1)
message = append(message, 0, 0, 0, 1)
// Topic name
topicLen := uint16(len(topic))
message = append(message, byte(topicLen>>8), byte(topicLen))
message = append(message, []byte(topic)...)
// Partitions count (1)
message = append(message, 0, 0, 0, 1)
// Partition 0 requesting earliest offset
message = append(message, 0, 0, 0, 0) // partition 0
message = append(message, 0, 0, 0, 0) // partition 0
message = append(message, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE) // -2 (earliest)
// Write message size and data
messageSize := uint32(len(message))
sizeBuf := make([]byte, 4)
@ -568,19 +437,19 @@ func TestHandler_ListOffsets_EndToEnd(t *testing.T) {
if len(respBuf) < 20 { // minimum response size
t.Fatalf("response too short: %d bytes", len(respBuf))
}
// Check correlation ID
respCorrelationID := binary.BigEndian.Uint32(respBuf[0:4])
if respCorrelationID != correlationID {
t.Errorf("correlation ID mismatch: got %d, want %d", respCorrelationID, correlationID)
}
// Check topics count
topicsCount := binary.BigEndian.Uint32(respBuf[8:12])
if topicsCount != 1 {
t.Errorf("expected 1 topic, got: %d", topicsCount)
}
// Check topic name (skip verification of full response for brevity)
// The important thing is we got a structurally valid response
@ -616,24 +485,24 @@ func TestHandler_Metadata_EndToEnd(t *testing.T) {
// Create Metadata request
correlationID := uint32(789)
clientID := "metadata-test"
message := make([]byte, 0, 64)
message = append(message, 0, 3) // API key 3 (Metadata)
message = append(message, 0, 0) // API version 0
message = append(message, 0, 3) // API key 3 (Metadata)
message = append(message, 0, 0) // API version 0
// Correlation ID
correlationIDBytes := make([]byte, 4)
binary.BigEndian.PutUint32(correlationIDBytes, correlationID)
message = append(message, correlationIDBytes...)
// Client ID length and string
clientIDLen := uint16(len(clientID))
message = append(message, byte(clientIDLen>>8), byte(clientIDLen))
message = append(message, []byte(clientID)...)
// Empty request body (all topics)
message = append(message, 0xFF, 0xFF, 0xFF, 0xFF) // -1 = all topics
// Write message size and data
messageSize := uint32(len(message))
sizeBuf := make([]byte, 4)
@ -668,13 +537,13 @@ func TestHandler_Metadata_EndToEnd(t *testing.T) {
if len(respBuf) < 40 { // minimum response size
t.Fatalf("response too short: %d bytes", len(respBuf))
}
// Check correlation ID
respCorrelationID := binary.BigEndian.Uint32(respBuf[0:4])
if respCorrelationID != correlationID {
t.Errorf("correlation ID mismatch: got %d, want %d", respCorrelationID, correlationID)
}
// Check brokers count
brokersCount := binary.BigEndian.Uint32(respBuf[8:12])
if brokersCount != 1 {

128
weed/mq/kafka/protocol/produce_test.go

@ -9,77 +9,77 @@ import (
func TestHandler_handleProduce(t *testing.T) {
h := NewHandler()
correlationID := uint32(333)
// First create a topic
h.topics["test-topic"] = &TopicInfo{
Name: "test-topic",
Partitions: 1,
CreatedAt: time.Now().UnixNano(),
}
// Build a simple Produce request with minimal record
clientID := "test-producer"
topicName := "test-topic"
requestBody := make([]byte, 0, 256)
// Client ID
requestBody = append(requestBody, 0, byte(len(clientID)))
requestBody = append(requestBody, []byte(clientID)...)
// Acks (1 - wait for leader acknowledgment)
requestBody = append(requestBody, 0, 1)
// Timeout (5000ms)
requestBody = append(requestBody, 0, 0, 0x13, 0x88)
// Topics count (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Topic name
requestBody = append(requestBody, 0, byte(len(topicName)))
requestBody = append(requestBody, []byte(topicName)...)
// Partitions count (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Partition 0
requestBody = append(requestBody, 0, 0, 0, 0) // partition ID
// Record set (simplified - just dummy data)
recordSet := make([]byte, 32)
// Basic record batch header structure for parsing
binary.BigEndian.PutUint64(recordSet[0:8], 0) // base offset
binary.BigEndian.PutUint64(recordSet[0:8], 0) // base offset
binary.BigEndian.PutUint32(recordSet[8:12], 24) // batch length
binary.BigEndian.PutUint32(recordSet[12:16], 0) // partition leader epoch
recordSet[16] = 2 // magic byte
binary.BigEndian.PutUint32(recordSet[16:20], 1) // record count at offset 16
recordSetSize := uint32(len(recordSet))
requestBody = append(requestBody, byte(recordSetSize>>24), byte(recordSetSize>>16), byte(recordSetSize>>8), byte(recordSetSize))
requestBody = append(requestBody, recordSet...)
response, err := h.handleProduce(correlationID, requestBody)
response, err := h.handleProduce(correlationID, 7, requestBody)
if err != nil {
t.Fatalf("handleProduce: %v", err)
}
if len(response) < 40 { // minimum expected size
t.Fatalf("response too short: %d bytes", len(response))
}
// Check correlation ID
respCorrelationID := binary.BigEndian.Uint32(response[0:4])
if respCorrelationID != correlationID {
t.Errorf("correlation ID: got %d, want %d", respCorrelationID, correlationID)
}
// Check topics count
topicsCount := binary.BigEndian.Uint32(response[4:8])
if topicsCount != 1 {
t.Errorf("topics count: got %d, want 1", topicsCount)
}
// Parse response structure
offset := 8
respTopicNameSize := binary.BigEndian.Uint16(response[offset : offset+2])
@ -87,45 +87,45 @@ func TestHandler_handleProduce(t *testing.T) {
if respTopicNameSize != uint16(len(topicName)) {
t.Errorf("response topic name size: got %d, want %d", respTopicNameSize, len(topicName))
}
respTopicName := string(response[offset : offset+int(respTopicNameSize)])
offset += int(respTopicNameSize)
if respTopicName != topicName {
t.Errorf("response topic name: got %s, want %s", respTopicName, topicName)
}
// Partitions count
respPartitionsCount := binary.BigEndian.Uint32(response[offset : offset+4])
offset += 4
if respPartitionsCount != 1 {
t.Errorf("response partitions count: got %d, want 1", respPartitionsCount)
}
// Partition response: partition_id(4) + error_code(2) + base_offset(8) + log_append_time(8) + log_start_offset(8)
partitionID := binary.BigEndian.Uint32(response[offset : offset+4])
offset += 4
if partitionID != 0 {
t.Errorf("partition ID: got %d, want 0", partitionID)
}
errorCode := binary.BigEndian.Uint16(response[offset : offset+2])
offset += 2
if errorCode != 0 {
t.Errorf("partition error: got %d, want 0", errorCode)
}
baseOffset := int64(binary.BigEndian.Uint64(response[offset : offset+8]))
offset += 8
if baseOffset < 0 {
t.Errorf("base offset: got %d, want >= 0", baseOffset)
}
// Verify record was added to ledger
ledger := h.GetLedger(topicName, 0)
if ledger == nil {
t.Fatalf("ledger not found for topic-partition")
}
if hwm := ledger.GetHighWaterMark(); hwm <= baseOffset {
t.Errorf("high water mark: got %d, want > %d", hwm, baseOffset)
}
@ -134,47 +134,47 @@ func TestHandler_handleProduce(t *testing.T) {
func TestHandler_handleProduce_UnknownTopic(t *testing.T) {
h := NewHandler()
correlationID := uint32(444)
// Build Produce request for non-existent topic
clientID := "test-producer"
topicName := "non-existent-topic"
requestBody := make([]byte, 0, 128)
// Client ID
requestBody = append(requestBody, 0, byte(len(clientID)))
requestBody = append(requestBody, []byte(clientID)...)
// Acks (1)
requestBody = append(requestBody, 0, 1)
// Timeout
requestBody = append(requestBody, 0, 0, 0x13, 0x88)
// Topics count (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Topic name
requestBody = append(requestBody, 0, byte(len(topicName)))
requestBody = append(requestBody, []byte(topicName)...)
// Partitions count (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Partition 0 with minimal record set
requestBody = append(requestBody, 0, 0, 0, 0) // partition ID
recordSet := make([]byte, 32) // dummy record set
recordSet := make([]byte, 32) // dummy record set
binary.BigEndian.PutUint32(recordSet[16:20], 1) // record count
recordSetSize := uint32(len(recordSet))
requestBody = append(requestBody, byte(recordSetSize>>24), byte(recordSetSize>>16), byte(recordSetSize>>8), byte(recordSetSize))
requestBody = append(requestBody, recordSet...)
response, err := h.handleProduce(correlationID, requestBody)
response, err := h.handleProduce(correlationID, 7, requestBody)
if err != nil {
t.Fatalf("handleProduce: %v", err)
}
// Parse response to check for UNKNOWN_TOPIC_OR_PARTITION error
offset := 8 + 2 + len(topicName) + 4 + 4 // skip to error code
errorCode := binary.BigEndian.Uint16(response[offset : offset+2])
@ -186,65 +186,65 @@ func TestHandler_handleProduce_UnknownTopic(t *testing.T) {
func TestHandler_handleProduce_FireAndForget(t *testing.T) {
h := NewHandler()
correlationID := uint32(555)
// Create a topic
h.topics["test-topic"] = &TopicInfo{
Name: "test-topic",
Partitions: 1,
CreatedAt: time.Now().UnixNano(),
}
// Build Produce request with acks=0 (fire and forget)
clientID := "test-producer"
topicName := "test-topic"
requestBody := make([]byte, 0, 128)
// Client ID
requestBody = append(requestBody, 0, byte(len(clientID)))
requestBody = append(requestBody, []byte(clientID)...)
// Acks (0 - fire and forget)
requestBody = append(requestBody, 0, 0)
// Timeout
requestBody = append(requestBody, 0, 0, 0x13, 0x88)
// Topics count (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Topic name
requestBody = append(requestBody, 0, byte(len(topicName)))
requestBody = append(requestBody, []byte(topicName)...)
// Partitions count (1)
// Partitions count (1)
requestBody = append(requestBody, 0, 0, 0, 1)
// Partition 0 with record set
requestBody = append(requestBody, 0, 0, 0, 0) // partition ID
recordSet := make([]byte, 32)
binary.BigEndian.PutUint32(recordSet[16:20], 1) // record count
recordSetSize := uint32(len(recordSet))
requestBody = append(requestBody, byte(recordSetSize>>24), byte(recordSetSize>>16), byte(recordSetSize>>8), byte(recordSetSize))
requestBody = append(requestBody, recordSet...)
response, err := h.handleProduce(correlationID, requestBody)
response, err := h.handleProduce(correlationID, 7, requestBody)
if err != nil {
t.Fatalf("handleProduce: %v", err)
}
// For acks=0, should return empty response
if len(response) != 0 {
t.Errorf("fire and forget response: got %d bytes, want 0", len(response))
}
// But record should still be added to ledger
ledger := h.GetLedger(topicName, 0)
if ledger == nil {
t.Fatalf("ledger not found for topic-partition")
}
if hwm := ledger.GetHighWaterMark(); hwm == 0 {
t.Errorf("high water mark: got %d, want > 0", hwm)
}
@ -252,15 +252,15 @@ func TestHandler_handleProduce_FireAndForget(t *testing.T) {
func TestHandler_parseRecordSet(t *testing.T) {
h := NewHandler()
// Test with valid record set
recordSet := make([]byte, 32)
binary.BigEndian.PutUint64(recordSet[0:8], 0) // base offset
binary.BigEndian.PutUint32(recordSet[8:12], 24) // batch length
binary.BigEndian.PutUint32(recordSet[8:12], 24) // batch length
binary.BigEndian.PutUint32(recordSet[12:16], 0) // partition leader epoch
recordSet[16] = 2 // magic byte
binary.BigEndian.PutUint32(recordSet[16:20], 3) // record count at correct offset
count, size, err := h.parseRecordSet(recordSet)
if err != nil {
t.Fatalf("parseRecordSet: %v", err)
@ -271,18 +271,18 @@ func TestHandler_parseRecordSet(t *testing.T) {
if size != int32(len(recordSet)) {
t.Errorf("total size: got %d, want %d", size, len(recordSet))
}
// Test with invalid record set (too small)
invalidRecordSet := []byte{1, 2, 3}
_, _, err = h.parseRecordSet(invalidRecordSet)
if err == nil {
t.Errorf("expected error for invalid record set")
}
// Test with unrealistic record count (should fall back to estimation)
badRecordSet := make([]byte, 32)
binary.BigEndian.PutUint32(badRecordSet[16:20], 999999999) // unrealistic count
count, size, err = h.parseRecordSet(badRecordSet)
if err != nil {
t.Fatalf("parseRecordSet fallback: %v", err)
@ -290,7 +290,7 @@ func TestHandler_parseRecordSet(t *testing.T) {
if count <= 0 {
t.Errorf("fallback count: got %d, want > 0", count)
}
// Test with small batch (should estimate 1 record)
smallRecordSet := make([]byte, 18) // Just enough for header check
count, size, err = h.parseRecordSet(smallRecordSet)

Loading…
Cancel
Save